Lines Matching +full:i2c +full:- +full:topology
26 #include <linux/i2c.h>
56 * protocol. The helpers contain a topology manager and bandwidth manager.
177 int idx = (lct / 2) - 1; in drm_dp_mst_get_ufp_num_at_lct_from_rad()
215 number_of_bits--; in drm_dp_msg_header_crc4()
219 bitshift--; in drm_dp_msg_header_crc4()
231 number_of_bits--; in drm_dp_msg_header_crc4()
249 number_of_bits--; in drm_dp_msg_data_crc4()
253 bitshift--; in drm_dp_msg_data_crc4()
265 number_of_bits--; in drm_dp_msg_data_crc4()
277 size += (hdr->lct / 2); in drm_dp_calc_sb_hdr_size()
288 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); in drm_dp_encode_sideband_msg_hdr()
289 for (i = 0; i < (hdr->lct / 2); i++) in drm_dp_encode_sideband_msg_hdr()
290 buf[idx++] = hdr->rad[i]; in drm_dp_encode_sideband_msg_hdr()
291 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | in drm_dp_encode_sideband_msg_hdr()
292 (hdr->msg_len & 0x3f); in drm_dp_encode_sideband_msg_hdr()
293 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); in drm_dp_encode_sideband_msg_hdr()
295 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); in drm_dp_encode_sideband_msg_hdr()
296 buf[idx - 1] |= (crc4 & 0xf); in drm_dp_encode_sideband_msg_hdr()
316 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); in drm_dp_decode_sideband_msg_hdr()
318 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { in drm_dp_decode_sideband_msg_hdr()
319 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); in drm_dp_decode_sideband_msg_hdr()
323 hdr->lct = (buf[0] & 0xf0) >> 4; in drm_dp_decode_sideband_msg_hdr()
324 hdr->lcr = (buf[0] & 0xf); in drm_dp_decode_sideband_msg_hdr()
326 for (i = 0; i < (hdr->lct / 2); i++) in drm_dp_decode_sideband_msg_hdr()
327 hdr->rad[i] = buf[idx++]; in drm_dp_decode_sideband_msg_hdr()
328 hdr->broadcast = (buf[idx] >> 7) & 0x1; in drm_dp_decode_sideband_msg_hdr()
329 hdr->path_msg = (buf[idx] >> 6) & 0x1; in drm_dp_decode_sideband_msg_hdr()
330 hdr->msg_len = buf[idx] & 0x3f; in drm_dp_decode_sideband_msg_hdr()
331 if (hdr->msg_len < 1) /* min space for body CRC */ in drm_dp_decode_sideband_msg_hdr()
335 hdr->somt = (buf[idx] >> 7) & 0x1; in drm_dp_decode_sideband_msg_hdr()
336 hdr->eomt = (buf[idx] >> 6) & 0x1; in drm_dp_decode_sideband_msg_hdr()
337 hdr->seqno = (buf[idx] >> 4) & 0x1; in drm_dp_decode_sideband_msg_hdr()
349 u8 *buf = raw->msg; in drm_dp_encode_sideband_req()
351 buf[idx++] = req->req_type & 0x7f; in drm_dp_encode_sideband_req()
353 switch (req->req_type) { in drm_dp_encode_sideband_req()
357 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
361 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | in drm_dp_encode_sideband_req()
362 (req->u.allocate_payload.number_sdp_streams & 0xf); in drm_dp_encode_sideband_req()
364 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); in drm_dp_encode_sideband_req()
366 buf[idx] = (req->u.allocate_payload.pbn >> 8); in drm_dp_encode_sideband_req()
368 buf[idx] = (req->u.allocate_payload.pbn & 0xff); in drm_dp_encode_sideband_req()
370 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { in drm_dp_encode_sideband_req()
371 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | in drm_dp_encode_sideband_req()
372 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); in drm_dp_encode_sideband_req()
375 if (req->u.allocate_payload.number_sdp_streams & 1) { in drm_dp_encode_sideband_req()
376 i = req->u.allocate_payload.number_sdp_streams - 1; in drm_dp_encode_sideband_req()
377 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; in drm_dp_encode_sideband_req()
382 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
384 buf[idx] = (req->u.query_payload.vcpi & 0x7f); in drm_dp_encode_sideband_req()
388 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
389 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; in drm_dp_encode_sideband_req()
391 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; in drm_dp_encode_sideband_req()
393 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); in drm_dp_encode_sideband_req()
395 buf[idx] = (req->u.dpcd_read.num_bytes); in drm_dp_encode_sideband_req()
400 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
401 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; in drm_dp_encode_sideband_req()
403 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; in drm_dp_encode_sideband_req()
405 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); in drm_dp_encode_sideband_req()
407 buf[idx] = (req->u.dpcd_write.num_bytes); in drm_dp_encode_sideband_req()
409 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); in drm_dp_encode_sideband_req()
410 idx += req->u.dpcd_write.num_bytes; in drm_dp_encode_sideband_req()
413 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
414 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); in drm_dp_encode_sideband_req()
416 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { in drm_dp_encode_sideband_req()
417 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; in drm_dp_encode_sideband_req()
419 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; in drm_dp_encode_sideband_req()
421 …memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes… in drm_dp_encode_sideband_req()
422 idx += req->u.i2c_read.transactions[i].num_bytes; in drm_dp_encode_sideband_req()
424 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; in drm_dp_encode_sideband_req()
425 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); in drm_dp_encode_sideband_req()
428 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; in drm_dp_encode_sideband_req()
430 buf[idx] = (req->u.i2c_read.num_bytes_read); in drm_dp_encode_sideband_req()
435 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
437 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; in drm_dp_encode_sideband_req()
439 buf[idx] = (req->u.i2c_write.num_bytes); in drm_dp_encode_sideband_req()
441 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); in drm_dp_encode_sideband_req()
442 idx += req->u.i2c_write.num_bytes; in drm_dp_encode_sideband_req()
447 msg = &req->u.enc_status; in drm_dp_encode_sideband_req()
448 buf[idx] = msg->stream_id; in drm_dp_encode_sideband_req()
450 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id)); in drm_dp_encode_sideband_req()
451 idx += sizeof(msg->client_id); in drm_dp_encode_sideband_req()
453 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event); in drm_dp_encode_sideband_req()
454 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0; in drm_dp_encode_sideband_req()
455 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior); in drm_dp_encode_sideband_req()
456 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0; in drm_dp_encode_sideband_req()
461 raw->cur_len = idx; in drm_dp_encode_sideband_req()
470 const u8 *buf = raw->msg; in drm_dp_decode_sideband_req()
473 req->req_type = buf[idx++] & 0x7f; in drm_dp_decode_sideband_req()
474 switch (req->req_type) { in drm_dp_decode_sideband_req()
478 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
483 &req->u.allocate_payload; in drm_dp_decode_sideband_req()
485 a->number_sdp_streams = buf[idx] & 0xf; in drm_dp_decode_sideband_req()
486 a->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
489 a->vcpi = buf[idx] & 0x7f; in drm_dp_decode_sideband_req()
491 a->pbn = buf[++idx] << 8; in drm_dp_decode_sideband_req()
492 a->pbn |= buf[++idx]; in drm_dp_decode_sideband_req()
495 for (i = 0; i < a->number_sdp_streams; i++) { in drm_dp_decode_sideband_req()
496 a->sdp_stream_sink[i] = in drm_dp_decode_sideband_req()
502 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
504 req->u.query_payload.vcpi = buf[idx] & 0x7f; in drm_dp_decode_sideband_req()
508 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read; in drm_dp_decode_sideband_req()
510 r->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
512 r->dpcd_address = (buf[idx] << 16) & 0xf0000; in drm_dp_decode_sideband_req()
513 r->dpcd_address |= (buf[++idx] << 8) & 0xff00; in drm_dp_decode_sideband_req()
514 r->dpcd_address |= buf[++idx] & 0xff; in drm_dp_decode_sideband_req()
516 r->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
522 &req->u.dpcd_write; in drm_dp_decode_sideband_req()
524 w->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
526 w->dpcd_address = (buf[idx] << 16) & 0xf0000; in drm_dp_decode_sideband_req()
527 w->dpcd_address |= (buf[++idx] << 8) & 0xff00; in drm_dp_decode_sideband_req()
528 w->dpcd_address |= buf[++idx] & 0xff; in drm_dp_decode_sideband_req()
530 w->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
532 w->bytes = kmemdup(&buf[++idx], w->num_bytes, in drm_dp_decode_sideband_req()
534 if (!w->bytes) in drm_dp_decode_sideband_req()
535 return -ENOMEM; in drm_dp_decode_sideband_req()
540 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read; in drm_dp_decode_sideband_req()
544 r->num_transactions = buf[idx] & 0x3; in drm_dp_decode_sideband_req()
545 r->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
546 for (i = 0; i < r->num_transactions; i++) { in drm_dp_decode_sideband_req()
547 tx = &r->transactions[i]; in drm_dp_decode_sideband_req()
549 tx->i2c_dev_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
550 tx->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
551 tx->bytes = kmemdup(&buf[++idx], in drm_dp_decode_sideband_req()
552 tx->num_bytes, in drm_dp_decode_sideband_req()
554 if (!tx->bytes) { in drm_dp_decode_sideband_req()
558 idx += tx->num_bytes; in drm_dp_decode_sideband_req()
559 tx->no_stop_bit = (buf[idx] >> 5) & 0x1; in drm_dp_decode_sideband_req()
560 tx->i2c_transaction_delay = buf[idx] & 0xf; in drm_dp_decode_sideband_req()
564 for (i = 0; i < r->num_transactions; i++) { in drm_dp_decode_sideband_req()
565 tx = &r->transactions[i]; in drm_dp_decode_sideband_req()
566 kfree(tx->bytes); in drm_dp_decode_sideband_req()
568 return -ENOMEM; in drm_dp_decode_sideband_req()
571 r->read_i2c_device_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
572 r->num_bytes_read = buf[++idx]; in drm_dp_decode_sideband_req()
577 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write; in drm_dp_decode_sideband_req()
579 w->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
580 w->write_i2c_device_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
581 w->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
582 w->bytes = kmemdup(&buf[++idx], w->num_bytes, in drm_dp_decode_sideband_req()
584 if (!w->bytes) in drm_dp_decode_sideband_req()
585 return -ENOMEM; in drm_dp_decode_sideband_req()
589 req->u.enc_status.stream_id = buf[idx++]; in drm_dp_decode_sideband_req()
590 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++) in drm_dp_decode_sideband_req()
591 req->u.enc_status.client_id[i] = buf[idx++]; in drm_dp_decode_sideband_req()
593 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0), in drm_dp_decode_sideband_req()
595 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2), in drm_dp_decode_sideband_req()
597 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3), in drm_dp_decode_sideband_req()
599 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5), in drm_dp_decode_sideband_req()
615 if (req->req_type == DP_LINK_ADDRESS) { in drm_dp_dump_sideband_msg_req_body()
617 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type)); in drm_dp_dump_sideband_msg_req_body()
621 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type)); in drm_dp_dump_sideband_msg_req_body()
624 switch (req->req_type) { in drm_dp_dump_sideband_msg_req_body()
628 P("port=%d\n", req->u.port_num.port_number); in drm_dp_dump_sideband_msg_req_body()
632 req->u.allocate_payload.port_number, in drm_dp_dump_sideband_msg_req_body()
633 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn, in drm_dp_dump_sideband_msg_req_body()
634 req->u.allocate_payload.number_sdp_streams, in drm_dp_dump_sideband_msg_req_body()
635 req->u.allocate_payload.number_sdp_streams, in drm_dp_dump_sideband_msg_req_body()
636 req->u.allocate_payload.sdp_stream_sink); in drm_dp_dump_sideband_msg_req_body()
640 req->u.query_payload.port_number, in drm_dp_dump_sideband_msg_req_body()
641 req->u.query_payload.vcpi); in drm_dp_dump_sideband_msg_req_body()
645 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address, in drm_dp_dump_sideband_msg_req_body()
646 req->u.dpcd_read.num_bytes); in drm_dp_dump_sideband_msg_req_body()
650 req->u.dpcd_write.port_number, in drm_dp_dump_sideband_msg_req_body()
651 req->u.dpcd_write.dpcd_address, in drm_dp_dump_sideband_msg_req_body()
652 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes, in drm_dp_dump_sideband_msg_req_body()
653 req->u.dpcd_write.bytes); in drm_dp_dump_sideband_msg_req_body()
657 req->u.i2c_read.port_number, in drm_dp_dump_sideband_msg_req_body()
658 req->u.i2c_read.num_transactions, in drm_dp_dump_sideband_msg_req_body()
659 req->u.i2c_read.read_i2c_device_id, in drm_dp_dump_sideband_msg_req_body()
660 req->u.i2c_read.num_bytes_read); in drm_dp_dump_sideband_msg_req_body()
663 for (i = 0; i < req->u.i2c_read.num_transactions; i++) { in drm_dp_dump_sideband_msg_req_body()
665 &req->u.i2c_read.transactions[i]; in drm_dp_dump_sideband_msg_req_body()
668 i, rtx->i2c_dev_id, rtx->num_bytes, in drm_dp_dump_sideband_msg_req_body()
669 rtx->no_stop_bit, rtx->i2c_transaction_delay, in drm_dp_dump_sideband_msg_req_body()
670 rtx->num_bytes, rtx->bytes); in drm_dp_dump_sideband_msg_req_body()
675 req->u.i2c_write.port_number, in drm_dp_dump_sideband_msg_req_body()
676 req->u.i2c_write.write_i2c_device_id, in drm_dp_dump_sideband_msg_req_body()
677 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, in drm_dp_dump_sideband_msg_req_body()
678 req->u.i2c_write.bytes); in drm_dp_dump_sideband_msg_req_body()
683 req->u.enc_status.stream_id, in drm_dp_dump_sideband_msg_req_body()
684 (int)ARRAY_SIZE(req->u.enc_status.client_id), in drm_dp_dump_sideband_msg_req_body()
685 req->u.enc_status.client_id, req->u.enc_status.stream_event, in drm_dp_dump_sideband_msg_req_body()
686 req->u.enc_status.valid_stream_event, in drm_dp_dump_sideband_msg_req_body()
687 req->u.enc_status.stream_behavior, in drm_dp_dump_sideband_msg_req_body()
688 req->u.enc_status.valid_stream_behavior); in drm_dp_dump_sideband_msg_req_body()
707 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf, in drm_dp_mst_dump_sideband_msg_tx()
710 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno, in drm_dp_mst_dump_sideband_msg_tx()
711 drm_dp_mst_sideband_tx_state_str(txmsg->state), in drm_dp_mst_dump_sideband_msg_tx()
712 txmsg->path_msg, buf); in drm_dp_mst_dump_sideband_msg_tx()
747 u8 *buf = raw->msg; in drm_dp_encode_sideband_reply()
749 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); in drm_dp_encode_sideband_reply()
751 raw->cur_len = idx; in drm_dp_encode_sideband_reply()
759 * ignore out-of-order messages or messages that are part of a in drm_dp_sideband_msg_set_header()
762 if (!hdr->somt && !msg->have_somt) in drm_dp_sideband_msg_set_header()
766 msg->curchunk_idx = 0; in drm_dp_sideband_msg_set_header()
767 msg->curchunk_len = hdr->msg_len; in drm_dp_sideband_msg_set_header()
768 msg->curchunk_hdrlen = hdrlen; in drm_dp_sideband_msg_set_header()
770 /* we have already gotten an somt - don't bother parsing */ in drm_dp_sideband_msg_set_header()
771 if (hdr->somt && msg->have_somt) in drm_dp_sideband_msg_set_header()
774 if (hdr->somt) { in drm_dp_sideband_msg_set_header()
775 memcpy(&msg->initial_hdr, hdr, in drm_dp_sideband_msg_set_header()
777 msg->have_somt = true; in drm_dp_sideband_msg_set_header()
779 if (hdr->eomt) in drm_dp_sideband_msg_set_header()
780 msg->have_eomt = true; in drm_dp_sideband_msg_set_header()
791 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); in drm_dp_sideband_append_payload()
792 msg->curchunk_idx += replybuflen; in drm_dp_sideband_append_payload()
794 if (msg->curchunk_idx >= msg->curchunk_len) { in drm_dp_sideband_append_payload()
796 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); in drm_dp_sideband_append_payload()
797 if (crc4 != msg->chunk[msg->curchunk_len - 1]) in drm_dp_sideband_append_payload()
800 msg->chunk, msg->curchunk_len, false); in drm_dp_sideband_append_payload()
802 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); in drm_dp_sideband_append_payload()
803 msg->curlen += msg->curchunk_len - 1; in drm_dp_sideband_append_payload()
815 import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]); in drm_dp_sideband_parse_link_address()
817 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_link_address()
819 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
821 for (i = 0; i < repmsg->u.link_addr.nports; i++) { in drm_dp_sideband_parse_link_address()
822 if (raw->msg[idx] & 0x80) in drm_dp_sideband_parse_link_address()
823 repmsg->u.link_addr.ports[i].input_port = 1; in drm_dp_sideband_parse_link_address()
825 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; in drm_dp_sideband_parse_link_address()
826 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_link_address()
829 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
831 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; in drm_dp_sideband_parse_link_address()
832 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; in drm_dp_sideband_parse_link_address()
833 if (repmsg->u.link_addr.ports[i].input_port == 0) in drm_dp_sideband_parse_link_address()
834 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; in drm_dp_sideband_parse_link_address()
836 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
838 if (repmsg->u.link_addr.ports[i].input_port == 0) { in drm_dp_sideband_parse_link_address()
839 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); in drm_dp_sideband_parse_link_address()
841 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
843 import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]); in drm_dp_sideband_parse_link_address()
845 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
847 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_link_address()
848 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_link_address()
852 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
858 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_link_address()
867 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_remote_dpcd_read()
869 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_read()
871 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; in drm_dp_sideband_parse_remote_dpcd_read()
873 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_read()
876 …memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_by… in drm_dp_sideband_parse_remote_dpcd_read()
879 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_dpcd_read()
888 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_remote_dpcd_write()
890 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_write()
894 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_dpcd_write()
903 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_remote_i2c_read_ack()
905 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_i2c_read_ack()
907 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; in drm_dp_sideband_parse_remote_i2c_read_ack()
910 …memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_byte… in drm_dp_sideband_parse_remote_i2c_read_ack()
913 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_i2c_read_ack()
922 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_enum_path_resources_ack()
923 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; in drm_dp_sideband_parse_enum_path_resources_ack()
925 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
927 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_enum_path_resources_ack()
929 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
931 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_enum_path_resources_ack()
933 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
937 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_enum_path_resources_ack()
946 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_allocate_payload_ack()
948 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
950 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; in drm_dp_sideband_parse_allocate_payload_ack()
952 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
954 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_allocate_payload_ack()
956 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
960 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_allocate_payload_ack()
969 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_query_payload_ack()
971 if (idx > raw->curlen) in drm_dp_sideband_parse_query_payload_ack()
973 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); in drm_dp_sideband_parse_query_payload_ack()
975 if (idx > raw->curlen) in drm_dp_sideband_parse_query_payload_ack()
979 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_query_payload_ack()
988 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_power_updown_phy_ack()
990 if (idx > raw->curlen) { in drm_dp_sideband_parse_power_updown_phy_ack()
992 idx, raw->curlen); in drm_dp_sideband_parse_power_updown_phy_ack()
1005 reply = &repmsg->u.enc_status; in drm_dp_sideband_parse_query_stream_enc_status()
1007 reply->stream_id = raw->msg[3]; in drm_dp_sideband_parse_query_stream_enc_status()
1009 reply->reply_signed = raw->msg[2] & BIT(0); in drm_dp_sideband_parse_query_stream_enc_status()
1019 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4); in drm_dp_sideband_parse_query_stream_enc_status()
1020 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3); in drm_dp_sideband_parse_query_stream_enc_status()
1022 reply->query_capable_device_present = raw->msg[2] & BIT(5); in drm_dp_sideband_parse_query_stream_enc_status()
1023 reply->legacy_device_present = raw->msg[2] & BIT(6); in drm_dp_sideband_parse_query_stream_enc_status()
1024 reply->unauthorizable_device_present = raw->msg[2] & BIT(7); in drm_dp_sideband_parse_query_stream_enc_status()
1026 reply->auth_completed = !!(raw->msg[1] & BIT(3)); in drm_dp_sideband_parse_query_stream_enc_status()
1027 reply->encryption_enabled = !!(raw->msg[1] & BIT(4)); in drm_dp_sideband_parse_query_stream_enc_status()
1028 reply->repeater_present = !!(raw->msg[1] & BIT(5)); in drm_dp_sideband_parse_query_stream_enc_status()
1029 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6; in drm_dp_sideband_parse_query_stream_enc_status()
1039 msg->reply_type = (raw->msg[0] & 0x80) >> 7; in drm_dp_sideband_parse_reply()
1040 msg->req_type = (raw->msg[0] & 0x7f); in drm_dp_sideband_parse_reply()
1042 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_sideband_parse_reply()
1043 import_guid(&msg->u.nak.guid, &raw->msg[1]); in drm_dp_sideband_parse_reply()
1044 msg->u.nak.reason = raw->msg[17]; in drm_dp_sideband_parse_reply()
1045 msg->u.nak.nak_data = raw->msg[18]; in drm_dp_sideband_parse_reply()
1049 switch (msg->req_type) { in drm_dp_sideband_parse_reply()
1074 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n", in drm_dp_sideband_parse_reply()
1075 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); in drm_dp_sideband_parse_reply()
1087 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; in drm_dp_sideband_parse_connection_status_notify()
1089 if (idx > raw->curlen) in drm_dp_sideband_parse_connection_status_notify()
1092 import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]); in drm_dp_sideband_parse_connection_status_notify()
1094 if (idx > raw->curlen) in drm_dp_sideband_parse_connection_status_notify()
1097 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1098 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1099 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1100 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1101 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); in drm_dp_sideband_parse_connection_status_notify()
1105 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n", in drm_dp_sideband_parse_connection_status_notify()
1106 idx, raw->curlen); in drm_dp_sideband_parse_connection_status_notify()
1116 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; in drm_dp_sideband_parse_resource_status_notify()
1118 if (idx > raw->curlen) in drm_dp_sideband_parse_resource_status_notify()
1121 import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]); in drm_dp_sideband_parse_resource_status_notify()
1123 if (idx > raw->curlen) in drm_dp_sideband_parse_resource_status_notify()
1126 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); in drm_dp_sideband_parse_resource_status_notify()
1130 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_resource_status_notify()
1139 msg->req_type = (raw->msg[0] & 0x7f); in drm_dp_sideband_parse_req()
1141 switch (msg->req_type) { in drm_dp_sideband_parse_req()
1147 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n", in drm_dp_sideband_parse_req()
1148 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); in drm_dp_sideband_parse_req()
1180 msg->path_msg = true; in build_clear_payload_id_table()
1191 msg->path_msg = true; in build_enum_path_resources()
1212 msg->path_msg = true; in build_allocate_payload()
1227 msg->path_msg = true; in build_power_updown_phy()
1255 * All updates to txmsg->state are protected by mgr->qlock, and the two in check_txmsg_state()
1259 state = READ_ONCE(txmsg->state); in check_txmsg_state()
1267 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_wait_tx_reply()
1275 * poll-waiting for the MST reply interrupt if we didn't receive in drm_dp_mst_wait_tx_reply()
1279 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason in drm_dp_mst_wait_tx_reply()
1286 ret = wait_event_timeout(mgr->tx_waitq, in drm_dp_mst_wait_tx_reply()
1288 mgr->cbs->poll_hpd_irq ? in drm_dp_mst_wait_tx_reply()
1292 if (ret || !mgr->cbs->poll_hpd_irq || in drm_dp_mst_wait_tx_reply()
1296 mgr->cbs->poll_hpd_irq(mgr); in drm_dp_mst_wait_tx_reply()
1299 mutex_lock(&mgr->qlock); in drm_dp_mst_wait_tx_reply()
1301 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { in drm_dp_mst_wait_tx_reply()
1302 ret = -EIO; in drm_dp_mst_wait_tx_reply()
1306 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n", in drm_dp_mst_wait_tx_reply()
1307 txmsg, txmsg->state, txmsg->seqno); in drm_dp_mst_wait_tx_reply()
1310 ret = -EIO; in drm_dp_mst_wait_tx_reply()
1313 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || in drm_dp_mst_wait_tx_reply()
1314 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || in drm_dp_mst_wait_tx_reply()
1315 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in drm_dp_mst_wait_tx_reply()
1316 list_del(&txmsg->next); in drm_dp_mst_wait_tx_reply()
1319 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { in drm_dp_mst_wait_tx_reply()
1320 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP, in drm_dp_mst_wait_tx_reply()
1325 mutex_unlock(&mgr->qlock); in drm_dp_mst_wait_tx_reply()
1339 mstb->lct = lct; in drm_dp_add_mst_branch_device()
1341 memcpy(mstb->rad, rad, lct / 2); in drm_dp_add_mst_branch_device()
1342 INIT_LIST_HEAD(&mstb->ports); in drm_dp_add_mst_branch_device()
1343 kref_init(&mstb->topology_kref); in drm_dp_add_mst_branch_device()
1344 kref_init(&mstb->malloc_kref); in drm_dp_add_mst_branch_device()
1353 if (mstb->port_parent) in drm_dp_free_mst_branch_device()
1354 drm_dp_mst_put_port_malloc(mstb->port_parent); in drm_dp_free_mst_branch_device()
1362 * Topology refcount overview
1367 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1369 * Topology refcounts are not exposed to drivers, and are handled internally
1371 * in-memory topology state from being changed in the middle of critical
1374 * of the topology until its topology refcount reaches zero. Additionally,
1382 * drm_dp_mst_branch allocated even after all of its topology references have
1384 * branch's last known state before it was disconnected from the topology.
1392 * helpers. Exposing this API to drivers in a race-free manner would take more
1396 * Refcount relationships in a topology
1399 * Let's take a look at why the relationship between topology and malloc
1402 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1404 * An example of topology and malloc refs in a DP MST topology with two
1405 * active payloads. Topology refcount increments are indicated by solid
1411 * As you can see in the above figure, every branch increments the topology
1418 * topology would start to look like the figure below.
1420 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1425 * Whenever a port or branch device's topology refcount reaches zero, it will
1426 * decrement the topology refcounts of all its children, the malloc refcount
1428 * #4, this means they both have been disconnected from the topology and freed
1430 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1438 * connected to the topology. In this case, we would travel up the topology as
1441 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1448 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1461 kref_get(&mstb->malloc_kref); in drm_dp_mst_get_mstb_malloc()
1462 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); in drm_dp_mst_get_mstb_malloc()
1466 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1479 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); in drm_dp_mst_put_mstb_malloc()
1480 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); in drm_dp_mst_put_mstb_malloc()
1488 drm_dp_mst_put_mstb_malloc(port->parent); in drm_dp_free_mst_port()
1493 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1512 kref_get(&port->malloc_kref); in drm_dp_mst_get_port_malloc()
1513 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref)); in drm_dp_mst_get_port_malloc()
1518 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1530 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); in drm_dp_mst_put_port_malloc()
1531 kref_put(&port->malloc_kref, drm_dp_free_mst_port); in drm_dp_mst_put_port_malloc()
1556 for (i = 0; i < history->len; i++) { in __topology_ref_save()
1557 if (history->entries[i].backtrace == backtrace) { in __topology_ref_save()
1558 entry = &history->entries[i]; in __topology_ref_save()
1566 int new_len = history->len + 1; in __topology_ref_save()
1568 new = krealloc(history->entries, sizeof(*new) * new_len, in __topology_ref_save()
1573 entry = &new[history->len]; in __topology_ref_save()
1574 history->len = new_len; in __topology_ref_save()
1575 history->entries = new; in __topology_ref_save()
1577 entry->backtrace = backtrace; in __topology_ref_save()
1578 entry->type = type; in __topology_ref_save()
1579 entry->count = 0; in __topology_ref_save()
1581 entry->count++; in __topology_ref_save()
1582 entry->ts_nsec = ktime_get_ns(); in __topology_ref_save()
1590 if (entry_a->ts_nsec > entry_b->ts_nsec) in topology_ref_history_cmp()
1592 else if (entry_a->ts_nsec < entry_b->ts_nsec) in topology_ref_history_cmp()
1593 return -1; in topology_ref_history_cmp()
1619 if (!history->len) in __dump_topology_ref_history()
1625 sort(history->entries, history->len, sizeof(*history->entries), in __dump_topology_ref_history()
1628 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", in __dump_topology_ref_history()
1631 for (i = 0; i < history->len; i++) { in __dump_topology_ref_history()
1633 &history->entries[i]; in __dump_topology_ref_history()
1634 u64 ts_nsec = entry->ts_nsec; in __dump_topology_ref_history()
1637 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4); in __dump_topology_ref_history()
1640 entry->count, in __dump_topology_ref_history()
1641 topology_ref_type_to_str(entry->type), in __dump_topology_ref_history()
1646 kfree(history->entries); in __dump_topology_ref_history()
1654 __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history, in drm_dp_mst_dump_mstb_topology_history()
1661 __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history, in drm_dp_mst_dump_port_topology_history()
1669 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); in save_mstb_topology_ref()
1676 __topology_ref_save(port->mgr, &port->topology_ref_history, type); in save_port_topology_ref()
1682 mutex_lock(&mgr->topology_ref_history_lock); in topology_ref_history_lock()
1688 mutex_unlock(&mgr->topology_ref_history_lock); in topology_ref_history_unlock()
1709 list_for_each_entry(payload, &state->payloads, next) in drm_atomic_get_mst_payload_state()
1710 if (payload->port == port) in drm_atomic_get_mst_payload_state()
1721 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_destroy_mst_branch_device()
1725 INIT_LIST_HEAD(&mstb->destroy_next); in drm_dp_destroy_mst_branch_device()
1728 * This can get called under mgr->mutex, so we need to perform the in drm_dp_destroy_mst_branch_device()
1731 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_destroy_mst_branch_device()
1732 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); in drm_dp_destroy_mst_branch_device()
1733 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_destroy_mst_branch_device()
1734 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); in drm_dp_destroy_mst_branch_device()
1738 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1740 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1742 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1743 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1744 * reached 0). Holding a topology reference implies that a malloc reference
1745 * will be held to @mstb as long as the user holds the topology reference.
1748 * reference to @mstb. If you already have a topology reference to @mstb, you
1756 * * 1: A topology reference was grabbed successfully
1757 * * 0: @port is no longer in the topology, no reference was grabbed
1764 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_try_get_mstb()
1765 ret = kref_get_unless_zero(&mstb->topology_kref); in drm_dp_mst_topology_try_get_mstb()
1767 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); in drm_dp_mst_topology_try_get_mstb()
1771 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_try_get_mstb()
1777 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1779 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1783 * you are already guaranteed to have at least one active topology reference
1792 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_get_mstb()
1795 WARN_ON(kref_read(&mstb->topology_kref) == 0); in drm_dp_mst_topology_get_mstb()
1796 kref_get(&mstb->topology_kref); in drm_dp_mst_topology_get_mstb()
1797 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); in drm_dp_mst_topology_get_mstb()
1799 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_get_mstb()
1803 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1805 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1807 * Releases a topology reference from @mstb by decrementing
1817 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_put_mstb()
1819 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); in drm_dp_mst_topology_put_mstb()
1822 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_put_mstb()
1823 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); in drm_dp_mst_topology_put_mstb()
1830 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_destroy_port()
1835 if (port->input) { in drm_dp_destroy_port()
1840 drm_edid_free(port->cached_edid); in drm_dp_destroy_port()
1846 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_destroy_port()
1847 list_add(&port->next, &mgr->destroy_port_list); in drm_dp_destroy_port()
1848 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_destroy_port()
1849 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); in drm_dp_destroy_port()
1853 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1855 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1857 * Attempts to grab a topology reference to @port, if it hasn't yet been
1858 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1859 * 0). Holding a topology reference implies that a malloc reference will be
1860 * held to @port as long as the user holds the topology reference.
1863 * reference to @port. If you already have a topology reference to @port, you
1871 * * 1: A topology reference was grabbed successfully
1872 * * 0: @port is no longer in the topology, no reference was grabbed
1879 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_try_get_port()
1880 ret = kref_get_unless_zero(&port->topology_kref); in drm_dp_mst_topology_try_get_port()
1882 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); in drm_dp_mst_topology_try_get_port()
1886 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_try_get_port()
1891 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1892 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1896 * you are already guaranteed to have at least one active topology reference
1905 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_get_port()
1907 WARN_ON(kref_read(&port->topology_kref) == 0); in drm_dp_mst_topology_get_port()
1908 kref_get(&port->topology_kref); in drm_dp_mst_topology_get_port()
1909 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); in drm_dp_mst_topology_get_port()
1912 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_get_port()
1916 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1917 * @port: The &struct drm_dp_mst_port to release the topology reference from
1919 * Releases a topology reference from @port by decrementing
1928 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_put_port()
1930 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); in drm_dp_mst_topology_put_port()
1933 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_put_port()
1934 kref_put(&port->topology_kref, drm_dp_destroy_port); in drm_dp_mst_topology_put_port()
1947 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_topology_get_mstb_validated_locked()
1948 if (port->mstb) { in drm_dp_mst_topology_get_mstb_validated_locked()
1950 port->mstb, to_find); in drm_dp_mst_topology_get_mstb_validated_locked()
1964 mutex_lock(&mgr->lock); in drm_dp_mst_topology_get_mstb_validated()
1965 if (mgr->mst_primary) { in drm_dp_mst_topology_get_mstb_validated()
1967 mgr->mst_primary, mstb); in drm_dp_mst_topology_get_mstb_validated()
1972 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_get_mstb_validated()
1982 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_topology_get_port_validated_locked()
1986 if (port->mstb) { in drm_dp_mst_topology_get_port_validated_locked()
1988 port->mstb, to_find); in drm_dp_mst_topology_get_port_validated_locked()
2002 mutex_lock(&mgr->lock); in drm_dp_mst_topology_get_port_validated()
2003 if (mgr->mst_primary) { in drm_dp_mst_topology_get_port_validated()
2005 mgr->mst_primary, port); in drm_dp_mst_topology_get_port_validated()
2010 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_get_port_validated()
2019 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_get_port()
2020 if (port->port_num == port_num) { in drm_dp_get_port()
2037 int parent_lct = port->parent->lct; in drm_dp_calculate_rad()
2039 int idx = (parent_lct - 1) / 2; in drm_dp_calculate_rad()
2042 memcpy(rad, port->parent->rad, idx + 1); in drm_dp_calculate_rad()
2047 rad[idx] |= port->port_num << shift; in drm_dp_calculate_rad()
2071 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_port_set_pdt()
2076 if (port->pdt == new_pdt && port->mcs == new_mcs) in drm_dp_port_set_pdt()
2080 if (port->pdt != DP_PEER_DEVICE_NONE) { in drm_dp_port_set_pdt()
2081 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_port_set_pdt()
2083 * If the new PDT would also have an i2c bus, in drm_dp_port_set_pdt()
2088 port->pdt = new_pdt; in drm_dp_port_set_pdt()
2089 port->mcs = new_mcs; in drm_dp_port_set_pdt()
2093 /* remove i2c over sideband */ in drm_dp_port_set_pdt()
2096 mutex_lock(&mgr->lock); in drm_dp_port_set_pdt()
2097 drm_dp_mst_topology_put_mstb(port->mstb); in drm_dp_port_set_pdt()
2098 port->mstb = NULL; in drm_dp_port_set_pdt()
2099 mutex_unlock(&mgr->lock); in drm_dp_port_set_pdt()
2103 port->pdt = new_pdt; in drm_dp_port_set_pdt()
2104 port->mcs = new_mcs; in drm_dp_port_set_pdt()
2106 if (port->pdt != DP_PEER_DEVICE_NONE) { in drm_dp_port_set_pdt()
2107 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_port_set_pdt()
2108 /* add i2c over sideband */ in drm_dp_port_set_pdt()
2114 ret = -ENOMEM; in drm_dp_port_set_pdt()
2115 drm_err(mgr->dev, "Failed to create MSTB for port %p", port); in drm_dp_port_set_pdt()
2119 mutex_lock(&mgr->lock); in drm_dp_port_set_pdt()
2120 port->mstb = mstb; in drm_dp_port_set_pdt()
2121 mstb->mgr = port->mgr; in drm_dp_port_set_pdt()
2122 mstb->port_parent = port; in drm_dp_port_set_pdt()
2129 mutex_unlock(&mgr->lock); in drm_dp_port_set_pdt()
2138 port->pdt = DP_PEER_DEVICE_NONE; in drm_dp_port_set_pdt()
2143 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2161 return drm_dp_send_dpcd_read(port->mgr, port, in drm_dp_mst_dpcd_read()
2166 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2184 return drm_dp_send_dpcd_write(port->mgr, port, in drm_dp_mst_dpcd_write()
2192 guid_copy(&mstb->guid, guid); in drm_dp_check_mstb_guid()
2194 if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) { in drm_dp_check_mstb_guid()
2197 export_guid(buf, &mstb->guid); in drm_dp_check_mstb_guid()
2199 if (mstb->port_parent) { in drm_dp_check_mstb_guid()
2200 ret = drm_dp_send_dpcd_write(mstb->mgr, in drm_dp_check_mstb_guid()
2201 mstb->port_parent, in drm_dp_check_mstb_guid()
2204 ret = drm_dp_dpcd_write(mstb->mgr->aux, in drm_dp_check_mstb_guid()
2210 return -EPROTO; in drm_dp_check_mstb_guid()
2223 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); in build_mst_prop_path()
2224 for (i = 0; i < (mstb->lct - 1); i++) { in build_mst_prop_path()
2226 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; in build_mst_prop_path()
2228 snprintf(temp, sizeof(temp), "-%d", port_num); in build_mst_prop_path()
2231 snprintf(temp, sizeof(temp), "-%d", pnum); in build_mst_prop_path()
2236 * drm_dp_mst_connector_late_register() - Late MST connector registration
2249 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n", in drm_dp_mst_connector_late_register()
2250 port->aux.name, connector->kdev->kobj.name); in drm_dp_mst_connector_late_register()
2252 port->aux.dev = connector->kdev; in drm_dp_mst_connector_late_register()
2253 return drm_dp_aux_register_devnode(&port->aux); in drm_dp_mst_connector_late_register()
2258 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2269 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n", in drm_dp_mst_connector_early_unregister()
2270 port->aux.name, connector->kdev->kobj.name); in drm_dp_mst_connector_early_unregister()
2271 drm_dp_aux_unregister_devnode(&port->aux); in drm_dp_mst_connector_early_unregister()
2279 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_port_add_connector()
2283 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); in drm_dp_mst_port_add_connector()
2284 port->connector = mgr->cbs->add_connector(mgr, port, proppath); in drm_dp_mst_port_add_connector()
2285 if (!port->connector) { in drm_dp_mst_port_add_connector()
2286 ret = -ENOMEM; in drm_dp_mst_port_add_connector()
2290 if (port->pdt != DP_PEER_DEVICE_NONE && in drm_dp_mst_port_add_connector()
2291 drm_dp_mst_is_end_device(port->pdt, port->mcs) && in drm_dp_mst_port_add_connector()
2293 port->cached_edid = drm_edid_read_ddc(port->connector, in drm_dp_mst_port_add_connector()
2294 &port->aux.ddc); in drm_dp_mst_port_add_connector()
2296 drm_connector_dynamic_register(port->connector); in drm_dp_mst_port_add_connector()
2300 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret); in drm_dp_mst_port_add_connector()
2304 * Drop a topology reference, and unlink the port from the in-memory topology
2311 mutex_lock(&mgr->lock); in drm_dp_mst_topology_unlink_port()
2312 port->parent->num_ports--; in drm_dp_mst_topology_unlink_port()
2313 list_del(&port->next); in drm_dp_mst_topology_unlink_port()
2314 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_unlink_port()
2328 kref_init(&port->topology_kref); in drm_dp_mst_add_port()
2329 kref_init(&port->malloc_kref); in drm_dp_mst_add_port()
2330 port->parent = mstb; in drm_dp_mst_add_port()
2331 port->port_num = port_number; in drm_dp_mst_add_port()
2332 port->mgr = mgr; in drm_dp_mst_add_port()
2333 port->aux.name = "DPMST"; in drm_dp_mst_add_port()
2334 port->aux.dev = dev->dev; in drm_dp_mst_add_port()
2335 port->aux.is_remote = true; in drm_dp_mst_add_port()
2338 port->aux.drm_dev = dev; in drm_dp_mst_add_port()
2339 drm_dp_remote_aux_init(&port->aux); in drm_dp_mst_add_port()
2355 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_handle_link_address_port()
2362 port = drm_dp_get_port(mstb, port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2365 port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2367 return -ENOMEM; in drm_dp_mst_handle_link_address_port()
2370 } else if (!port->input && port_msg->input_port && port->connector) { in drm_dp_mst_handle_link_address_port()
2371 /* Since port->connector can't be changed here, we create a in drm_dp_mst_handle_link_address_port()
2377 port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2379 return -ENOMEM; in drm_dp_mst_handle_link_address_port()
2382 } else if (port->input && !port_msg->input_port) { in drm_dp_mst_handle_link_address_port()
2384 } else if (port->connector) { in drm_dp_mst_handle_link_address_port()
2388 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_mst_handle_link_address_port()
2390 changed = port->ddps != port_msg->ddps || in drm_dp_mst_handle_link_address_port()
2391 (port->ddps && in drm_dp_mst_handle_link_address_port()
2392 (port->ldps != port_msg->legacy_device_plug_status || in drm_dp_mst_handle_link_address_port()
2393 port->dpcd_rev != port_msg->dpcd_revision || in drm_dp_mst_handle_link_address_port()
2394 port->mcs != port_msg->mcs || in drm_dp_mst_handle_link_address_port()
2395 port->pdt != port_msg->peer_device_type || in drm_dp_mst_handle_link_address_port()
2396 port->num_sdp_stream_sinks != in drm_dp_mst_handle_link_address_port()
2397 port_msg->num_sdp_stream_sinks)); in drm_dp_mst_handle_link_address_port()
2400 port->input = port_msg->input_port; in drm_dp_mst_handle_link_address_port()
2401 if (!port->input) in drm_dp_mst_handle_link_address_port()
2402 new_pdt = port_msg->peer_device_type; in drm_dp_mst_handle_link_address_port()
2403 new_mcs = port_msg->mcs; in drm_dp_mst_handle_link_address_port()
2404 port->ddps = port_msg->ddps; in drm_dp_mst_handle_link_address_port()
2405 port->ldps = port_msg->legacy_device_plug_status; in drm_dp_mst_handle_link_address_port()
2406 port->dpcd_rev = port_msg->dpcd_revision; in drm_dp_mst_handle_link_address_port()
2407 port->num_sdp_streams = port_msg->num_sdp_streams; in drm_dp_mst_handle_link_address_port()
2408 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; in drm_dp_mst_handle_link_address_port()
2410 /* manage mstb port lists with mgr lock - take a reference in drm_dp_mst_handle_link_address_port()
2413 mutex_lock(&mgr->lock); in drm_dp_mst_handle_link_address_port()
2415 list_add(&port->next, &mstb->ports); in drm_dp_mst_handle_link_address_port()
2416 mstb->num_ports++; in drm_dp_mst_handle_link_address_port()
2417 mutex_unlock(&mgr->lock); in drm_dp_mst_handle_link_address_port()
2421 * Reprobe PBN caps on both hotplug, and when re-probing the link in drm_dp_mst_handle_link_address_port()
2424 if (port->ddps && !port->input) { in drm_dp_mst_handle_link_address_port()
2430 port->full_pbn = 0; in drm_dp_mst_handle_link_address_port()
2446 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && in drm_dp_mst_handle_link_address_port()
2447 port->mcs) in drm_dp_mst_handle_link_address_port()
2450 if (port->connector) in drm_dp_mst_handle_link_address_port()
2451 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_link_address_port()
2452 else if (!port->input) in drm_dp_mst_handle_link_address_port()
2455 if (send_link_addr && port->mstb) { in drm_dp_mst_handle_link_address_port()
2456 ret = drm_dp_send_link_address(mgr, port->mstb); in drm_dp_mst_handle_link_address_port()
2469 if (port->connector) in drm_dp_mst_handle_link_address_port()
2470 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_link_address_port()
2480 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_handle_conn_stat()
2487 port = drm_dp_get_port(mstb, conn_stat->port_number); in drm_dp_mst_handle_conn_stat()
2491 if (port->connector) { in drm_dp_mst_handle_conn_stat()
2492 if (!port->input && conn_stat->input_port) { in drm_dp_mst_handle_conn_stat()
2499 mstb->link_address_sent = false; in drm_dp_mst_handle_conn_stat()
2505 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_mst_handle_conn_stat()
2506 } else if (port->input && !conn_stat->input_port) { in drm_dp_mst_handle_conn_stat()
2509 mstb->link_address_sent = false; in drm_dp_mst_handle_conn_stat()
2513 old_ddps = port->ddps; in drm_dp_mst_handle_conn_stat()
2514 port->input = conn_stat->input_port; in drm_dp_mst_handle_conn_stat()
2515 port->ldps = conn_stat->legacy_device_plug_status; in drm_dp_mst_handle_conn_stat()
2516 port->ddps = conn_stat->displayport_device_plug_status; in drm_dp_mst_handle_conn_stat()
2518 if (old_ddps != port->ddps) { in drm_dp_mst_handle_conn_stat()
2519 if (port->ddps && !port->input) in drm_dp_mst_handle_conn_stat()
2522 port->full_pbn = 0; in drm_dp_mst_handle_conn_stat()
2525 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; in drm_dp_mst_handle_conn_stat()
2526 new_mcs = conn_stat->message_capability_status; in drm_dp_mst_handle_conn_stat()
2531 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret); in drm_dp_mst_handle_conn_stat()
2535 if (port->connector) in drm_dp_mst_handle_conn_stat()
2536 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_conn_stat()
2553 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device()
2554 mstb = mgr->mst_primary; in drm_dp_get_mst_branch_device()
2562 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_get_mst_branch_device()
2563 if (port->port_num == port_num) { in drm_dp_get_mst_branch_device()
2564 mstb = port->mstb; in drm_dp_get_mst_branch_device()
2566 drm_err(mgr->dev, in drm_dp_get_mst_branch_device()
2580 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device()
2594 if (guid_equal(&mstb->guid, guid)) in get_mst_branch_device_by_guid_helper()
2597 list_for_each_entry(port, &mstb->ports, next) { in get_mst_branch_device_by_guid_helper()
2598 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); in get_mst_branch_device_by_guid_helper()
2615 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
2617 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); in drm_dp_get_mst_branch_device_by_guid()
2624 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
2635 if (!mstb->link_address_sent) { in drm_dp_check_and_send_link_address()
2643 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_check_and_send_link_address()
2644 if (port->input || !port->ddps || !port->mstb) in drm_dp_check_and_send_link_address()
2647 ret = drm_dp_check_and_send_link_address(mgr, port->mstb); in drm_dp_check_and_send_link_address()
2661 struct drm_device *dev = mgr->dev; in drm_dp_mst_link_probe_work()
2666 mutex_lock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2668 mutex_lock(&mgr->lock); in drm_dp_mst_link_probe_work()
2669 clear_payload_id_table = !mgr->payload_id_table_cleared; in drm_dp_mst_link_probe_work()
2670 mgr->payload_id_table_cleared = true; in drm_dp_mst_link_probe_work()
2672 mstb = mgr->mst_primary; in drm_dp_mst_link_probe_work()
2678 mutex_unlock(&mgr->lock); in drm_dp_mst_link_probe_work()
2680 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2688 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C in drm_dp_mst_link_probe_work()
2700 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2707 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_queue_probe_work()
2745 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); in drm_dp_send_sideband_msg()
2747 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, in drm_dp_send_sideband_msg()
2751 if (ret == -EIO && retries < 5) { in drm_dp_send_sideband_msg()
2755 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); in drm_dp_send_sideband_msg()
2757 return -EIO; in drm_dp_send_sideband_msg()
2760 total -= tosend; in drm_dp_send_sideband_msg()
2768 struct drm_dp_mst_branch *mstb = txmsg->dst; in set_hdr_from_dst_qlock()
2771 req_type = txmsg->msg[0] & 0x7f; in set_hdr_from_dst_qlock()
2775 hdr->broadcast = 1; in set_hdr_from_dst_qlock()
2777 hdr->broadcast = 0; in set_hdr_from_dst_qlock()
2778 hdr->path_msg = txmsg->path_msg; in set_hdr_from_dst_qlock()
2779 if (hdr->broadcast) { in set_hdr_from_dst_qlock()
2780 hdr->lct = 1; in set_hdr_from_dst_qlock()
2781 hdr->lcr = 6; in set_hdr_from_dst_qlock()
2783 hdr->lct = mstb->lct; in set_hdr_from_dst_qlock()
2784 hdr->lcr = mstb->lct - 1; in set_hdr_from_dst_qlock()
2787 memcpy(hdr->rad, mstb->rad, hdr->lct / 2); in set_hdr_from_dst_qlock()
2803 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in process_single_tx_qlock()
2808 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) in process_single_tx_qlock()
2809 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; in process_single_tx_qlock()
2817 len = txmsg->cur_len - txmsg->cur_offset; in process_single_tx_qlock()
2819 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ in process_single_tx_qlock()
2820 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); in process_single_tx_qlock()
2823 if (len == txmsg->cur_len) in process_single_tx_qlock()
2831 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); in process_single_tx_qlock()
2839 struct drm_printer p = drm_dbg_printer(mgr->dev, in process_single_tx_qlock()
2849 txmsg->cur_offset += tosend; in process_single_tx_qlock()
2850 if (txmsg->cur_offset == txmsg->cur_len) { in process_single_tx_qlock()
2851 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; in process_single_tx_qlock()
2862 WARN_ON(!mutex_is_locked(&mgr->qlock)); in process_single_down_tx_qlock()
2865 if (list_empty(&mgr->tx_msg_downq)) in process_single_down_tx_qlock()
2868 txmsg = list_first_entry(&mgr->tx_msg_downq, in process_single_down_tx_qlock()
2872 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret); in process_single_down_tx_qlock()
2873 list_del(&txmsg->next); in process_single_down_tx_qlock()
2874 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in process_single_down_tx_qlock()
2875 wake_up_all(&mgr->tx_waitq); in process_single_down_tx_qlock()
2882 mutex_lock(&mgr->qlock); in drm_dp_queue_down_tx()
2883 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); in drm_dp_queue_down_tx()
2886 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP, in drm_dp_queue_down_tx()
2892 if (list_is_singular(&mgr->tx_msg_downq)) in drm_dp_queue_down_tx()
2894 mutex_unlock(&mgr->qlock); in drm_dp_queue_down_tx()
2904 for (i = 0; i < reply->nports; i++) { in drm_dp_dump_link_address()
2905 port_reply = &reply->ports[i]; in drm_dp_dump_link_address()
2906 drm_dbg_kms(mgr->dev, in drm_dp_dump_link_address()
2909 port_reply->input_port, in drm_dp_dump_link_address()
2910 port_reply->peer_device_type, in drm_dp_dump_link_address()
2911 port_reply->port_number, in drm_dp_dump_link_address()
2912 port_reply->dpcd_revision, in drm_dp_dump_link_address()
2913 port_reply->mcs, in drm_dp_dump_link_address()
2914 port_reply->ddps, in drm_dp_dump_link_address()
2915 port_reply->legacy_device_plug_status, in drm_dp_dump_link_address()
2916 port_reply->num_sdp_streams, in drm_dp_dump_link_address()
2917 port_reply->num_sdp_stream_sinks); in drm_dp_dump_link_address()
2932 return -ENOMEM; in drm_dp_send_link_address()
2934 txmsg->dst = mstb; in drm_dp_send_link_address()
2937 mstb->link_address_sent = true; in drm_dp_send_link_address()
2943 drm_err(mgr->dev, "Sending link address failed with %d\n", ret); in drm_dp_send_link_address()
2946 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_link_address()
2947 drm_err(mgr->dev, "link address NAK received\n"); in drm_dp_send_link_address()
2948 ret = -EIO; in drm_dp_send_link_address()
2952 reply = &txmsg->reply.u.link_addr; in drm_dp_send_link_address()
2953 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); in drm_dp_send_link_address()
2956 ret = drm_dp_check_mstb_guid(mstb, &reply->guid); in drm_dp_send_link_address()
2960 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf)); in drm_dp_send_link_address()
2961 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret); in drm_dp_send_link_address()
2965 for (i = 0; i < reply->nports; i++) { in drm_dp_send_link_address()
2966 port_mask |= BIT(reply->ports[i].port_number); in drm_dp_send_link_address()
2967 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, in drm_dp_send_link_address()
2968 &reply->ports[i]); in drm_dp_send_link_address()
2975 /* Prune any ports that are currently a part of mstb in our in-memory in drm_dp_send_link_address()
2976 * topology, but were not seen in this link address. Usually this in drm_dp_send_link_address()
2977 * means that they were removed while the topology was out of sync, in drm_dp_send_link_address()
2980 mutex_lock(&mgr->lock); in drm_dp_send_link_address()
2981 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { in drm_dp_send_link_address()
2982 if (port_mask & BIT(port->port_num)) in drm_dp_send_link_address()
2985 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n", in drm_dp_send_link_address()
2986 port->port_num); in drm_dp_send_link_address()
2987 list_del(&port->next); in drm_dp_send_link_address()
2991 mutex_unlock(&mgr->lock); in drm_dp_send_link_address()
2995 mstb->link_address_sent = false; in drm_dp_send_link_address()
3011 txmsg->dst = mstb; in drm_dp_send_clear_payload_id_table()
3017 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_clear_payload_id_table()
3018 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n"); in drm_dp_send_clear_payload_id_table()
3034 return -ENOMEM; in drm_dp_send_enum_path_resources()
3036 txmsg->dst = mstb; in drm_dp_send_enum_path_resources()
3037 build_enum_path_resources(txmsg, port->port_num); in drm_dp_send_enum_path_resources()
3044 path_res = &txmsg->reply.u.path_resources; in drm_dp_send_enum_path_resources()
3046 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_enum_path_resources()
3047 drm_dbg_kms(mgr->dev, "enum path resources nak received\n"); in drm_dp_send_enum_path_resources()
3049 if (port->port_num != path_res->port_number) in drm_dp_send_enum_path_resources()
3052 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n", in drm_dp_send_enum_path_resources()
3053 path_res->port_number, in drm_dp_send_enum_path_resources()
3054 path_res->full_payload_bw_number, in drm_dp_send_enum_path_resources()
3055 path_res->avail_payload_bw_number); in drm_dp_send_enum_path_resources()
3061 if (port->full_pbn != path_res->full_payload_bw_number || in drm_dp_send_enum_path_resources()
3062 port->fec_capable != path_res->fec_capable) in drm_dp_send_enum_path_resources()
3065 port->full_pbn = path_res->full_payload_bw_number; in drm_dp_send_enum_path_resources()
3066 port->fec_capable = path_res->fec_capable; in drm_dp_send_enum_path_resources()
3076 if (!mstb->port_parent) in drm_dp_get_last_connected_port_to_mstb()
3079 if (mstb->port_parent->mstb != mstb) in drm_dp_get_last_connected_port_to_mstb()
3080 return mstb->port_parent; in drm_dp_get_last_connected_port_to_mstb()
3082 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); in drm_dp_get_last_connected_port_to_mstb()
3086 * Searches upwards in the topology starting from mstb to try to find the
3088 * topology. This can be used in order to perform operations like releasing
3101 mutex_lock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
3102 if (!mgr->mst_primary) in drm_dp_get_last_connected_port_and_mstb()
3110 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { in drm_dp_get_last_connected_port_and_mstb()
3111 rmstb = found_port->parent; in drm_dp_get_last_connected_port_and_mstb()
3112 *port_num = found_port->port_num; in drm_dp_get_last_connected_port_and_mstb()
3115 mstb = found_port->parent; in drm_dp_get_last_connected_port_and_mstb()
3119 mutex_unlock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
3134 port_num = port->port_num; in drm_dp_payload_send_msg()
3135 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_payload_send_msg()
3138 port->parent, in drm_dp_payload_send_msg()
3142 return -EINVAL; in drm_dp_payload_send_msg()
3147 ret = -ENOMEM; in drm_dp_payload_send_msg()
3151 for (i = 0; i < port->num_sdp_streams; i++) in drm_dp_payload_send_msg()
3154 txmsg->dst = mstb; in drm_dp_payload_send_msg()
3157 pbn, port->num_sdp_streams, sinks); in drm_dp_payload_send_msg()
3164 * mstb could also be removed from the topology. In the future, this in drm_dp_payload_send_msg()
3167 * timeout if the topology is still connected to the system. in drm_dp_payload_send_msg()
3171 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_payload_send_msg()
3172 ret = -EINVAL; in drm_dp_payload_send_msg()
3190 return -EINVAL; in drm_dp_send_power_updown_phy()
3195 return -ENOMEM; in drm_dp_send_power_updown_phy()
3198 txmsg->dst = port->parent; in drm_dp_send_power_updown_phy()
3199 build_power_updown_phy(txmsg, port->port_num, power_up); in drm_dp_send_power_updown_phy()
3202 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); in drm_dp_send_power_updown_phy()
3204 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_power_updown_phy()
3205 ret = -EINVAL; in drm_dp_send_power_updown_phy()
3228 return -ENOMEM; in drm_dp_send_query_stream_enc_status()
3232 ret = -EINVAL; in drm_dp_send_query_stream_enc_status()
3238 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_send_query_stream_enc_status()
3239 state = to_drm_dp_mst_topology_state(mgr->base.state); in drm_dp_send_query_stream_enc_status()
3247 txmsg->dst = mgr->mst_primary; in drm_dp_send_query_stream_enc_status()
3249 build_query_stream_enc_status(txmsg, payload->vcpi, nonce); in drm_dp_send_query_stream_enc_status()
3253 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg); in drm_dp_send_query_stream_enc_status()
3256 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_query_stream_enc_status()
3257 drm_dbg_kms(mgr->dev, "query encryption status nak received\n"); in drm_dp_send_query_stream_enc_status()
3258 ret = -ENXIO; in drm_dp_send_query_stream_enc_status()
3263 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); in drm_dp_send_query_stream_enc_status()
3266 drm_modeset_unlock(&mgr->base.lock); in drm_dp_send_query_stream_enc_status()
3277 return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, in drm_dp_create_payload_at_dfp()
3278 payload->time_slots); in drm_dp_create_payload_at_dfp()
3285 struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); in drm_dp_create_payload_to_remote()
3288 return -EIO; in drm_dp_create_payload_to_remote()
3290 ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn); in drm_dp_create_payload_to_remote()
3299 drm_dbg_kms(mgr->dev, "\n"); in drm_dp_destroy_payload_at_remote_and_dfp()
3302 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE) { in drm_dp_destroy_payload_at_remote_and_dfp()
3303 drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0); in drm_dp_destroy_payload_at_remote_and_dfp()
3304 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP; in drm_dp_destroy_payload_at_remote_and_dfp()
3307 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) in drm_dp_destroy_payload_at_remote_and_dfp()
3308 drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0); in drm_dp_destroy_payload_at_remote_and_dfp()
3312 * drm_dp_add_payload_part1() - Execute payload update part 1
3330 if (mgr->payload_count == 0) in drm_dp_add_payload_part1()
3331 mgr->next_start_slot = mst_state->start_slot; in drm_dp_add_payload_part1()
3333 payload->vc_start_slot = mgr->next_start_slot; in drm_dp_add_payload_part1()
3335 mgr->payload_count++; in drm_dp_add_payload_part1()
3336 mgr->next_start_slot += payload->time_slots; in drm_dp_add_payload_part1()
3338 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL; in drm_dp_add_payload_part1()
3341 port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); in drm_dp_add_payload_part1()
3343 drm_dbg_kms(mgr->dev, in drm_dp_add_payload_part1()
3344 "VCPI %d for port %p not in topology, not creating a payload to remote\n", in drm_dp_add_payload_part1()
3345 payload->vcpi, payload->port); in drm_dp_add_payload_part1()
3346 return -EIO; in drm_dp_add_payload_part1()
3351 drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n", in drm_dp_add_payload_part1()
3352 payload->port, ret); in drm_dp_add_payload_part1()
3356 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP; in drm_dp_add_payload_part1()
3366 * drm_dp_remove_payload_part1() - Remove an MST payload along the virtual channel
3382 mutex_lock(&mgr->lock); in drm_dp_remove_payload_part1()
3383 send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary); in drm_dp_remove_payload_part1()
3384 mutex_unlock(&mgr->lock); in drm_dp_remove_payload_part1()
3389 drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n", in drm_dp_remove_payload_part1()
3390 payload->vcpi); in drm_dp_remove_payload_part1()
3392 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL; in drm_dp_remove_payload_part1()
3397 * drm_dp_remove_payload_part2() - Remove an MST payload locally
3416 list_for_each_entry(pos, &mst_state->payloads, next) { in drm_dp_remove_payload_part2()
3417 if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot) in drm_dp_remove_payload_part2()
3418 pos->vc_start_slot -= old_payload->time_slots; in drm_dp_remove_payload_part2()
3420 new_payload->vc_start_slot = -1; in drm_dp_remove_payload_part2()
3422 mgr->payload_count--; in drm_dp_remove_payload_part2()
3423 mgr->next_start_slot -= old_payload->time_slots; in drm_dp_remove_payload_part2()
3425 if (new_payload->delete) in drm_dp_remove_payload_part2()
3426 drm_dp_mst_put_port_malloc(new_payload->port); in drm_dp_remove_payload_part2()
3428 new_payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE; in drm_dp_remove_payload_part2()
3432 * drm_dp_add_payload_part2() - Execute payload update part 2
3447 if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) { in drm_dp_add_payload_part2()
3448 drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", in drm_dp_add_payload_part2()
3449 payload->port->connector->name); in drm_dp_add_payload_part2()
3450 return -EIO; in drm_dp_add_payload_part2()
3456 drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n", in drm_dp_add_payload_part2()
3457 payload->port, ret); in drm_dp_add_payload_part2()
3459 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE; in drm_dp_add_payload_part2()
3473 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_send_dpcd_read()
3475 return -EINVAL; in drm_dp_send_dpcd_read()
3479 ret = -ENOMEM; in drm_dp_send_dpcd_read()
3483 build_dpcd_read(txmsg, port->port_num, offset, size); in drm_dp_send_dpcd_read()
3484 txmsg->dst = port->parent; in drm_dp_send_dpcd_read()
3492 if (txmsg->reply.reply_type == 1) { in drm_dp_send_dpcd_read()
3493 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", in drm_dp_send_dpcd_read()
3494 mstb, port->port_num, offset, size); in drm_dp_send_dpcd_read()
3495 ret = -EIO; in drm_dp_send_dpcd_read()
3499 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { in drm_dp_send_dpcd_read()
3500 ret = -EPROTO; in drm_dp_send_dpcd_read()
3504 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, in drm_dp_send_dpcd_read()
3506 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); in drm_dp_send_dpcd_read()
3524 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_send_dpcd_write()
3526 return -EINVAL; in drm_dp_send_dpcd_write()
3530 ret = -ENOMEM; in drm_dp_send_dpcd_write()
3534 build_dpcd_write(txmsg, port->port_num, offset, size, bytes); in drm_dp_send_dpcd_write()
3535 txmsg->dst = mstb; in drm_dp_send_dpcd_write()
3541 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_dpcd_write()
3542 ret = -EIO; in drm_dp_send_dpcd_write()
3571 return -ENOMEM; in drm_dp_send_up_ack_reply()
3573 txmsg->dst = mstb; in drm_dp_send_up_ack_reply()
3576 mutex_lock(&mgr->qlock); in drm_dp_send_up_ack_reply()
3579 mutex_unlock(&mgr->qlock); in drm_dp_send_up_ack_reply()
3586 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
3613 * drm_dp_read_mst_cap() - Read the sink's MST mode capability
3641 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3643 * @mst_state: true to enable MST on this connector - false to disable.
3653 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
3654 if (mst_state == mgr->mst_state) in drm_dp_mst_topology_mgr_set_mst()
3657 mgr->mst_state = mst_state; in drm_dp_mst_topology_mgr_set_mst()
3660 WARN_ON(mgr->mst_primary); in drm_dp_mst_topology_mgr_set_mst()
3663 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); in drm_dp_mst_topology_mgr_set_mst()
3665 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", in drm_dp_mst_topology_mgr_set_mst()
3666 mgr->aux->name, ret); in drm_dp_mst_topology_mgr_set_mst()
3673 ret = -ENOMEM; in drm_dp_mst_topology_mgr_set_mst()
3676 mstb->mgr = mgr; in drm_dp_mst_topology_mgr_set_mst()
3679 mgr->mst_primary = mstb; in drm_dp_mst_topology_mgr_set_mst()
3680 drm_dp_mst_topology_get_mstb(mgr->mst_primary); in drm_dp_mst_topology_mgr_set_mst()
3682 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_set_mst()
3690 drm_dp_dpcd_clear_payload(mgr->aux); in drm_dp_mst_topology_mgr_set_mst()
3697 mstb = mgr->mst_primary; in drm_dp_mst_topology_mgr_set_mst()
3698 mgr->mst_primary = NULL; in drm_dp_mst_topology_mgr_set_mst()
3700 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); in drm_dp_mst_topology_mgr_set_mst()
3702 mgr->payload_id_table_cleared = false; in drm_dp_mst_topology_mgr_set_mst()
3704 mgr->reset_rx_state = true; in drm_dp_mst_topology_mgr_set_mst()
3708 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
3721 /* The link address will need to be re-sent on resume */ in drm_dp_mst_topology_mgr_invalidate_mstb()
3722 mstb->link_address_sent = false; in drm_dp_mst_topology_mgr_invalidate_mstb()
3724 list_for_each_entry(port, &mstb->ports, next) in drm_dp_mst_topology_mgr_invalidate_mstb()
3725 if (port->mstb) in drm_dp_mst_topology_mgr_invalidate_mstb()
3726 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); in drm_dp_mst_topology_mgr_invalidate_mstb()
3730 * drm_dp_mst_topology_queue_probe - Queue a topology probe
3733 * Queue a work to probe the MST topology. Driver's should call this only to
3734 * sync the topology's HW->SW state after the MST link's parameters have
3735 * changed in a way the state could've become out-of-sync. This is the case
3737 * branch device has switched between UHBR and non-UHBR rates. Except of those
3738 * cases - for instance when a sink gets plugged/unplugged to a port - the SW
3743 mutex_lock(&mgr->lock); in drm_dp_mst_topology_queue_probe()
3745 if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary)) in drm_dp_mst_topology_queue_probe()
3748 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); in drm_dp_mst_topology_queue_probe()
3752 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_queue_probe()
3757 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3765 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3766 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_suspend()
3768 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3769 flush_work(&mgr->up_req_work); in drm_dp_mst_topology_mgr_suspend()
3770 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_suspend()
3771 flush_work(&mgr->delayed_destroy_work); in drm_dp_mst_topology_mgr_suspend()
3773 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3774 if (mgr->mst_state && mgr->mst_primary) in drm_dp_mst_topology_mgr_suspend()
3775 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); in drm_dp_mst_topology_mgr_suspend()
3776 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3781 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3783 * @sync: whether or not to perform topology reprobing synchronously
3788 * If the device fails this returns -1, and the driver should do
3797 * Returns: -1 if the MST topology was removed while we were suspended, 0
3807 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3808 if (!mgr->mst_primary) in drm_dp_mst_topology_mgr_resume()
3811 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { in drm_dp_mst_topology_mgr_resume()
3812 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3816 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_resume()
3821 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3826 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); in drm_dp_mst_topology_mgr_resume()
3828 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3834 ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid); in drm_dp_mst_topology_mgr_resume()
3836 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3841 * For the final step of resuming the topology, we need to bring the in drm_dp_mst_topology_mgr_resume()
3842 * state of our in-memory topology back into sync with reality. So, in drm_dp_mst_topology_mgr_resume()
3846 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3849 drm_dbg_kms(mgr->dev, in drm_dp_mst_topology_mgr_resume()
3850 "Waiting for link probe work to finish re-syncing topology...\n"); in drm_dp_mst_topology_mgr_resume()
3851 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_resume()
3857 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3858 return -1; in drm_dp_mst_topology_mgr_resume()
3878 up ? &mgr->up_req_recv : &mgr->down_rep_recv; in drm_dp_get_one_sb_msg()
3885 len = min(mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
3886 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); in drm_dp_get_one_sb_msg()
3888 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); in drm_dp_get_one_sb_msg()
3896 drm_dbg_kms(mgr->dev, "ERROR: failed header\n"); in drm_dp_get_one_sb_msg()
3904 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct); in drm_dp_get_one_sb_msg()
3910 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]); in drm_dp_get_one_sb_msg()
3914 replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); in drm_dp_get_one_sb_msg()
3917 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]); in drm_dp_get_one_sb_msg()
3921 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len; in drm_dp_get_one_sb_msg()
3924 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
3925 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, in drm_dp_get_one_sb_msg()
3928 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", in drm_dp_get_one_sb_msg()
3935 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n"); in drm_dp_get_one_sb_msg()
3940 replylen -= len; in drm_dp_get_one_sb_msg()
3954 const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr; in verify_rx_request_type()
3955 const struct drm_dp_mst_branch *mstb = txmsg->dst; in verify_rx_request_type()
3956 int tx_req_type = get_msg_request_type(txmsg->msg[0]); in verify_rx_request_type()
3957 int rx_req_type = get_msg_request_type(rxmsg->msg[0]); in verify_rx_request_type()
3963 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str)); in verify_rx_request_type()
3964 drm_dbg_kms(mgr->dev, in verify_rx_request_type()
3966 mstb, hdr->seqno, mstb->lct, rad_str, in verify_rx_request_type()
3977 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv; in drm_dp_mst_handle_down_rep()
3982 /* Multi-packet message transmission, don't clear the reply */ in drm_dp_mst_handle_down_rep()
3983 if (!msg->have_eomt) in drm_dp_mst_handle_down_rep()
3987 mutex_lock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
3989 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, in drm_dp_mst_handle_down_rep()
3993 if (!txmsg || txmsg->dst != mstb) { in drm_dp_mst_handle_down_rep()
3996 hdr = &msg->initial_hdr; in drm_dp_mst_handle_down_rep()
3997 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", in drm_dp_mst_handle_down_rep()
3998 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); in drm_dp_mst_handle_down_rep()
4000 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
4006 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
4011 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply); in drm_dp_mst_handle_down_rep()
4013 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_handle_down_rep()
4014 drm_dbg_kms(mgr->dev, in drm_dp_mst_handle_down_rep()
4016 txmsg->reply.req_type, in drm_dp_mst_handle_down_rep()
4017 drm_dp_mst_req_type_str(txmsg->reply.req_type), in drm_dp_mst_handle_down_rep()
4018 txmsg->reply.u.nak.reason, in drm_dp_mst_handle_down_rep()
4019 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), in drm_dp_mst_handle_down_rep()
4020 txmsg->reply.u.nak.nak_data); in drm_dp_mst_handle_down_rep()
4023 txmsg->state = DRM_DP_SIDEBAND_TX_RX; in drm_dp_mst_handle_down_rep()
4024 list_del(&txmsg->next); in drm_dp_mst_handle_down_rep()
4026 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
4028 wake_up_all(&mgr->tx_waitq); in drm_dp_mst_handle_down_rep()
4043 mutex_lock(&mgr->lock); in primary_mstb_probing_is_done()
4045 if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) { in primary_mstb_probing_is_done()
4046 probing_done = mgr->mst_primary->link_address_sent; in primary_mstb_probing_is_done()
4047 drm_dp_mst_topology_put_mstb(mgr->mst_primary); in primary_mstb_probing_is_done()
4050 mutex_unlock(&mgr->lock); in primary_mstb_probing_is_done()
4060 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; in drm_dp_mst_process_up_req()
4061 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; in drm_dp_mst_process_up_req()
4064 if (hdr->broadcast) { in drm_dp_mst_process_up_req()
4067 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) in drm_dp_mst_process_up_req()
4068 guid = &msg->u.conn_stat.guid; in drm_dp_mst_process_up_req()
4069 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) in drm_dp_mst_process_up_req()
4070 guid = &msg->u.resource_stat.guid; in drm_dp_mst_process_up_req()
4075 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); in drm_dp_mst_process_up_req()
4079 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct); in drm_dp_mst_process_up_req()
4084 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { in drm_dp_mst_process_up_req()
4086 drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n"); in drm_dp_mst_process_up_req()
4088 dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); in drm_dp_mst_process_up_req()
4096 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_process_up_req()
4108 mutex_lock(&mgr->probe_lock); in drm_dp_mst_up_req_work()
4110 mutex_lock(&mgr->up_req_lock); in drm_dp_mst_up_req_work()
4111 up_req = list_first_entry_or_null(&mgr->up_req_list, in drm_dp_mst_up_req_work()
4115 list_del(&up_req->next); in drm_dp_mst_up_req_work()
4116 mutex_unlock(&mgr->up_req_lock); in drm_dp_mst_up_req_work()
4124 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_up_req_work()
4127 drm_kms_helper_hotplug_event(mgr->dev); in drm_dp_mst_up_req_work()
4139 if (!mgr->up_req_recv.have_eomt) in drm_dp_mst_handle_up_req()
4144 ret = -ENOMEM; in drm_dp_mst_handle_up_req()
4148 INIT_LIST_HEAD(&up_req->next); in drm_dp_mst_handle_up_req()
4150 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg); in drm_dp_mst_handle_up_req()
4152 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && in drm_dp_mst_handle_up_req()
4153 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4154 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", in drm_dp_mst_handle_up_req()
4155 up_req->msg.req_type); in drm_dp_mst_handle_up_req()
4160 mutex_lock(&mgr->lock); in drm_dp_mst_handle_up_req()
4161 mst_primary = mgr->mst_primary; in drm_dp_mst_handle_up_req()
4163 mutex_unlock(&mgr->lock); in drm_dp_mst_handle_up_req()
4167 mutex_unlock(&mgr->lock); in drm_dp_mst_handle_up_req()
4169 drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, in drm_dp_mst_handle_up_req()
4174 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4176 &up_req->msg.u.conn_stat; in drm_dp_mst_handle_up_req()
4178 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", in drm_dp_mst_handle_up_req()
4179 conn_stat->port_number, in drm_dp_mst_handle_up_req()
4180 conn_stat->legacy_device_plug_status, in drm_dp_mst_handle_up_req()
4181 conn_stat->displayport_device_plug_status, in drm_dp_mst_handle_up_req()
4182 conn_stat->message_capability_status, in drm_dp_mst_handle_up_req()
4183 conn_stat->input_port, in drm_dp_mst_handle_up_req()
4184 conn_stat->peer_device_type); in drm_dp_mst_handle_up_req()
4185 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4187 &up_req->msg.u.resource_stat; in drm_dp_mst_handle_up_req()
4189 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n", in drm_dp_mst_handle_up_req()
4190 res_stat->port_number, in drm_dp_mst_handle_up_req()
4191 res_stat->available_pbn); in drm_dp_mst_handle_up_req()
4194 up_req->hdr = mgr->up_req_recv.initial_hdr; in drm_dp_mst_handle_up_req()
4195 mutex_lock(&mgr->up_req_lock); in drm_dp_mst_handle_up_req()
4196 list_add_tail(&up_req->next, &mgr->up_req_list); in drm_dp_mst_handle_up_req()
4197 mutex_unlock(&mgr->up_req_lock); in drm_dp_mst_handle_up_req()
4198 queue_work(system_long_wq, &mgr->up_req_work); in drm_dp_mst_handle_up_req()
4200 reset_msg_rx_state(&mgr->up_req_recv); in drm_dp_mst_handle_up_req()
4206 mutex_lock(&mgr->lock); in update_msg_rx_state()
4207 if (mgr->reset_rx_state) { in update_msg_rx_state()
4208 mgr->reset_rx_state = false; in update_msg_rx_state()
4209 reset_msg_rx_state(&mgr->down_rep_recv); in update_msg_rx_state()
4210 reset_msg_rx_state(&mgr->up_req_recv); in update_msg_rx_state()
4212 mutex_unlock(&mgr->lock); in update_msg_rx_state()
4216 * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
4224 * topology manager will process the sideband messages received
4244 if (sc != mgr->sink_count) { in drm_dp_mst_hpd_irq_handle_event()
4245 mgr->sink_count = sc; in drm_dp_mst_hpd_irq_handle_event()
4268 * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
4281 mutex_lock(&mgr->qlock); in drm_dp_mst_hpd_irq_send_new_request()
4282 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, in drm_dp_mst_hpd_irq_send_new_request()
4286 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || in drm_dp_mst_hpd_irq_send_new_request()
4287 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in drm_dp_mst_hpd_irq_send_new_request()
4289 mutex_unlock(&mgr->qlock); in drm_dp_mst_hpd_irq_send_new_request()
4296 * drm_dp_mst_detect_port() - get connection status for an MST port
4317 ret = drm_modeset_lock(&mgr->base.lock, ctx); in drm_dp_mst_detect_port()
4323 if (!port->ddps) in drm_dp_mst_detect_port()
4326 switch (port->pdt) { in drm_dp_mst_detect_port()
4330 if (!port->mcs) in drm_dp_mst_detect_port()
4336 /* for logical ports - cache the EDID */ in drm_dp_mst_detect_port()
4337 if (drm_dp_mst_port_is_logical(port) && !port->cached_edid) in drm_dp_mst_detect_port()
4338 port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc); in drm_dp_mst_detect_port()
4341 if (port->ldps) in drm_dp_mst_detect_port()
4352 * drm_dp_mst_edid_read() - get EDID for an MST port
4372 if (port->cached_edid) in drm_dp_mst_edid_read()
4373 drm_edid = drm_edid_dup(port->cached_edid); in drm_dp_mst_edid_read()
4375 drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc); in drm_dp_mst_edid_read()
4384 * drm_dp_mst_get_edid() - get EDID for an MST port
4413 * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
4415 * @mgr: MST topology manager for the port
4423 * atomic state is added whenever the state of payloads in the topology changes.
4453 conn_state = drm_atomic_get_new_connector_state(state, port->connector); in drm_dp_atomic_find_time_slots()
4454 topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc); in drm_dp_atomic_find_time_slots()
4459 prev_slots = payload->time_slots; in drm_dp_atomic_find_time_slots()
4460 prev_bw = payload->pbn; in drm_dp_atomic_find_time_slots()
4467 if (drm_WARN_ON(mgr->dev, payload->delete)) { in drm_dp_atomic_find_time_slots()
4468 drm_err(mgr->dev, in drm_dp_atomic_find_time_slots()
4471 return -EINVAL; in drm_dp_atomic_find_time_slots()
4475 req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full); in drm_dp_atomic_find_time_slots()
4477 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n", in drm_dp_atomic_find_time_slots()
4478 port->connector->base.id, port->connector->name, in drm_dp_atomic_find_time_slots()
4480 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", in drm_dp_atomic_find_time_slots()
4481 port->connector->base.id, port->connector->name, in drm_dp_atomic_find_time_slots()
4488 return -ENOMEM; in drm_dp_atomic_find_time_slots()
4491 payload->port = port; in drm_dp_atomic_find_time_slots()
4492 payload->vc_start_slot = -1; in drm_dp_atomic_find_time_slots()
4493 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE; in drm_dp_atomic_find_time_slots()
4494 list_add(&payload->next, &topology_state->payloads); in drm_dp_atomic_find_time_slots()
4496 payload->time_slots = req_slots; in drm_dp_atomic_find_time_slots()
4497 payload->pbn = pbn; in drm_dp_atomic_find_time_slots()
4504 * drm_dp_atomic_release_time_slots() - Release allocated time slots
4506 * @mgr: MST topology manager for the port
4515 * topology.
4539 old_conn_state = drm_atomic_get_old_connector_state(state, port->connector); in drm_dp_atomic_release_time_slots()
4540 if (!old_conn_state->crtc) in drm_dp_atomic_release_time_slots()
4544 new_conn_state = drm_atomic_get_new_connector_state(state, port->connector); in drm_dp_atomic_release_time_slots()
4545 if (new_conn_state->crtc) { in drm_dp_atomic_release_time_slots()
4547 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); in drm_dp_atomic_release_time_slots()
4553 if (!crtc_state->mode_changed && !crtc_state->connectors_changed) in drm_dp_atomic_release_time_slots()
4561 topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc); in drm_dp_atomic_release_time_slots()
4567 drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n", in drm_dp_atomic_release_time_slots()
4568 port, &topology_state->base); in drm_dp_atomic_release_time_slots()
4569 return -EINVAL; in drm_dp_atomic_release_time_slots()
4572 if (new_conn_state->crtc) in drm_dp_atomic_release_time_slots()
4575 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots); in drm_dp_atomic_release_time_slots()
4576 if (!payload->delete) { in drm_dp_atomic_release_time_slots()
4577 payload->pbn = 0; in drm_dp_atomic_release_time_slots()
4578 payload->delete = true; in drm_dp_atomic_release_time_slots()
4579 topology_state->payload_mask &= ~BIT(payload->vcpi - 1); in drm_dp_atomic_release_time_slots()
4587 * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
4591 * currently assigned to an MST topology. Drivers must call this hook from their
4606 if (!mst_state->pending_crtc_mask) in drm_dp_mst_atomic_setup_commit()
4609 num_commit_deps = hweight32(mst_state->pending_crtc_mask); in drm_dp_mst_atomic_setup_commit()
4610 mst_state->commit_deps = kmalloc_array(num_commit_deps, in drm_dp_mst_atomic_setup_commit()
4611 sizeof(*mst_state->commit_deps), GFP_KERNEL); in drm_dp_mst_atomic_setup_commit()
4612 if (!mst_state->commit_deps) in drm_dp_mst_atomic_setup_commit()
4613 return -ENOMEM; in drm_dp_mst_atomic_setup_commit()
4614 mst_state->num_commit_deps = num_commit_deps; in drm_dp_mst_atomic_setup_commit()
4618 if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) { in drm_dp_mst_atomic_setup_commit()
4619 mst_state->commit_deps[commit_idx++] = in drm_dp_mst_atomic_setup_commit()
4620 drm_crtc_commit_get(crtc_state->commit); in drm_dp_mst_atomic_setup_commit()
4630 * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
4635 * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
4636 * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
4638 * the modeset objects in these commits share are an MST topology.
4642 * determined at commit-time) from the previous state.
4655 for (j = 0; j < old_mst_state->num_commit_deps; j++) { in drm_dp_mst_atomic_wait_for_dependencies()
4656 ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]); in drm_dp_mst_atomic_wait_for_dependencies()
4658 drm_err(state->dev, "Failed to wait for %s: %d\n", in drm_dp_mst_atomic_wait_for_dependencies()
4659 old_mst_state->commit_deps[j]->crtc->name, ret); in drm_dp_mst_atomic_wait_for_dependencies()
4665 list_for_each_entry(old_payload, &old_mst_state->payloads, next) { in drm_dp_mst_atomic_wait_for_dependencies()
4666 if (old_payload->delete) in drm_dp_mst_atomic_wait_for_dependencies()
4670 old_payload->port); in drm_dp_mst_atomic_wait_for_dependencies()
4671 new_payload->vc_start_slot = old_payload->vc_start_slot; in drm_dp_mst_atomic_wait_for_dependencies()
4672 new_payload->payload_allocation_status = in drm_dp_mst_atomic_wait_for_dependencies()
4673 old_payload->payload_allocation_status; in drm_dp_mst_atomic_wait_for_dependencies()
4680 * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
4683 * @mgr: The MST topology manager for the &drm_connector
4686 * serialize non-blocking commits happening on the real DP connector of an MST topology switching
4687 * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
4688 * MST topology will never share the same &drm_encoder.
4691 * state to determine if it is about to have a modeset - and then pulling in the MST topology state
4704 struct drm_atomic_state *state = new_conn_state->state; in drm_dp_mst_root_conn_atomic_check()
4706 drm_atomic_get_old_connector_state(state, new_conn_state->connector); in drm_dp_mst_root_conn_atomic_check()
4710 if (new_conn_state->crtc) { in drm_dp_mst_root_conn_atomic_check()
4711 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4717 mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4721 if (old_conn_state->crtc) { in drm_dp_mst_root_conn_atomic_check()
4722 crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4730 mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4739 * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
4746 mst_state->total_avail_slots = 64; in drm_dp_mst_update_slots()
4747 mst_state->start_slot = 0; in drm_dp_mst_update_slots()
4749 mst_state->total_avail_slots = 63; in drm_dp_mst_update_slots()
4750 mst_state->start_slot = 1; in drm_dp_mst_update_slots()
4760 * drm_dp_check_act_status() - Polls for ACT handled status.
4764 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4778 return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000); in drm_dp_check_act_status()
4783 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4820 queue_work(system_long_wq, &mgr->tx_work); in drm_dp_mst_kick_tx()
4849 int tabs = mstb->lct; in drm_dp_mst_dump_mstb()
4857 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports); in drm_dp_mst_dump_mstb()
4858 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_dump_mstb()
4859 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n", in drm_dp_mst_dump_mstb()
4861 port->port_num, in drm_dp_mst_dump_mstb()
4863 port->input ? "input" : "output", in drm_dp_mst_dump_mstb()
4864 pdt_to_string(port->pdt), in drm_dp_mst_dump_mstb()
4865 port->ddps, in drm_dp_mst_dump_mstb()
4866 port->ldps, in drm_dp_mst_dump_mstb()
4867 port->num_sdp_streams, in drm_dp_mst_dump_mstb()
4868 port->num_sdp_stream_sinks, in drm_dp_mst_dump_mstb()
4869 port->fec_capable ? "true" : "false", in drm_dp_mst_dump_mstb()
4870 port->connector); in drm_dp_mst_dump_mstb()
4871 if (port->mstb) in drm_dp_mst_dump_mstb()
4872 drm_dp_mst_dump_mstb(m, port->mstb); in drm_dp_mst_dump_mstb()
4884 if (drm_dp_dpcd_read(mgr->aux, in dump_dp_payload_table()
4898 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); in fetch_monitor_name()
4904 * drm_dp_mst_dump_topology(): dump topology to seq file.
4906 * @mgr: manager to dump current topology for.
4908 * helper to dump MST topology to a seq file for debugfs.
4924 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
4925 if (mgr->mst_primary) in drm_dp_mst_dump_topology()
4926 drm_dp_mst_dump_mstb(m, mgr->mst_primary); in drm_dp_mst_dump_topology()
4929 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
4931 ret = drm_modeset_lock_single_interruptible(&mgr->base.lock); in drm_dp_mst_dump_topology()
4935 state = to_drm_dp_mst_topology_state(mgr->base.state); in drm_dp_mst_dump_topology()
4938 state->payload_mask, mgr->max_payloads, state->start_slot, in drm_dp_mst_dump_topology()
4939 dfixed_trunc(state->pbn_div)); in drm_dp_mst_dump_topology()
4942 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_dump_topology()
4943 list_for_each_entry(payload, &state->payloads, next) { in drm_dp_mst_dump_topology()
4946 if (payload->vcpi != i || payload->delete) in drm_dp_mst_dump_topology()
4949 fetch_monitor_name(mgr, payload->port, name, sizeof(name)); in drm_dp_mst_dump_topology()
4950 seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %8s %19s\n", in drm_dp_mst_dump_topology()
4952 payload->port->port_num, in drm_dp_mst_dump_topology()
4953 payload->vcpi, in drm_dp_mst_dump_topology()
4954 payload->vc_start_slot, in drm_dp_mst_dump_topology()
4955 payload->vc_start_slot + payload->time_slots - 1, in drm_dp_mst_dump_topology()
4956 payload->pbn, in drm_dp_mst_dump_topology()
4957 payload->dsc_enabled ? "Y" : "N", in drm_dp_mst_dump_topology()
4958 status[payload->payload_allocation_status], in drm_dp_mst_dump_topology()
4964 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
4965 if (mgr->mst_primary) { in drm_dp_mst_dump_topology()
4969 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) { in drm_dp_mst_dump_topology()
4975 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); in drm_dp_mst_dump_topology()
4982 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); in drm_dp_mst_dump_topology()
4990 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); in drm_dp_mst_dump_topology()
5006 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
5007 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_dump_topology()
5015 mutex_lock(&mgr->qlock); in drm_dp_tx_work()
5016 if (!list_empty(&mgr->tx_msg_downq)) in drm_dp_tx_work()
5018 mutex_unlock(&mgr->qlock); in drm_dp_tx_work()
5024 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); in drm_dp_delayed_destroy_port()
5026 if (port->connector) { in drm_dp_delayed_destroy_port()
5027 drm_connector_unregister(port->connector); in drm_dp_delayed_destroy_port()
5028 drm_connector_put(port->connector); in drm_dp_delayed_destroy_port()
5037 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_delayed_destroy_mstb()
5042 mutex_lock(&mgr->lock); in drm_dp_delayed_destroy_mstb()
5043 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) { in drm_dp_delayed_destroy_mstb()
5044 list_del(&port->next); in drm_dp_delayed_destroy_mstb()
5047 mutex_unlock(&mgr->lock); in drm_dp_delayed_destroy_mstb()
5050 mutex_lock(&mstb->mgr->qlock); in drm_dp_delayed_destroy_mstb()
5051 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) { in drm_dp_delayed_destroy_mstb()
5052 if (txmsg->dst != mstb) in drm_dp_delayed_destroy_mstb()
5055 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in drm_dp_delayed_destroy_mstb()
5056 list_del(&txmsg->next); in drm_dp_delayed_destroy_mstb()
5059 mutex_unlock(&mstb->mgr->qlock); in drm_dp_delayed_destroy_mstb()
5062 wake_up_all(&mstb->mgr->tx_waitq); in drm_dp_delayed_destroy_mstb()
5076 * connector lock before destroying the mstb/port, to avoid AB->BA in drm_dp_delayed_destroy_work()
5085 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5086 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, in drm_dp_delayed_destroy_work()
5090 list_del(&mstb->destroy_next); in drm_dp_delayed_destroy_work()
5091 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5103 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5104 port = list_first_entry_or_null(&mgr->destroy_port_list, in drm_dp_delayed_destroy_work()
5108 list_del(&port->next); in drm_dp_delayed_destroy_work()
5109 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5121 drm_kms_helper_hotplug_event(mgr->dev); in drm_dp_delayed_destroy_work()
5128 to_dp_mst_topology_state(obj->state); in drm_dp_mst_duplicate_state()
5135 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); in drm_dp_mst_duplicate_state()
5137 INIT_LIST_HEAD(&state->payloads); in drm_dp_mst_duplicate_state()
5138 state->commit_deps = NULL; in drm_dp_mst_duplicate_state()
5139 state->num_commit_deps = 0; in drm_dp_mst_duplicate_state()
5140 state->pending_crtc_mask = 0; in drm_dp_mst_duplicate_state()
5142 list_for_each_entry(pos, &old_state->payloads, next) { in drm_dp_mst_duplicate_state()
5144 if (pos->delete) in drm_dp_mst_duplicate_state()
5151 drm_dp_mst_get_port_malloc(payload->port); in drm_dp_mst_duplicate_state()
5152 list_add(&payload->next, &state->payloads); in drm_dp_mst_duplicate_state()
5155 return &state->base; in drm_dp_mst_duplicate_state()
5158 list_for_each_entry_safe(pos, payload, &state->payloads, next) { in drm_dp_mst_duplicate_state()
5159 drm_dp_mst_put_port_malloc(pos->port); in drm_dp_mst_duplicate_state()
5175 list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) { in drm_dp_mst_destroy_state()
5177 if (!pos->delete) in drm_dp_mst_destroy_state()
5178 drm_dp_mst_put_port_malloc(pos->port); in drm_dp_mst_destroy_state()
5182 for (i = 0; i < mst_state->num_commit_deps; i++) in drm_dp_mst_destroy_state()
5183 drm_crtc_commit_put(mst_state->commit_deps[i]); in drm_dp_mst_destroy_state()
5185 kfree(mst_state->commit_deps); in drm_dp_mst_destroy_state()
5192 while (port->parent) { in drm_dp_mst_port_downstream_of_branch()
5193 if (port->parent == branch) in drm_dp_mst_port_downstream_of_branch()
5196 if (port->parent->port_parent) in drm_dp_mst_port_downstream_of_branch()
5197 port = port->parent->port_parent; in drm_dp_mst_port_downstream_of_branch()
5209 if (!mgr->mst_primary) in drm_dp_mst_port_downstream_of_parent_locked()
5212 port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, in drm_dp_mst_port_downstream_of_parent_locked()
5220 parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, in drm_dp_mst_port_downstream_of_parent_locked()
5225 if (!parent->mstb) in drm_dp_mst_port_downstream_of_parent_locked()
5228 return drm_dp_mst_port_downstream_of_branch(port, parent->mstb); in drm_dp_mst_port_downstream_of_parent_locked()
5232 * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port
5233 * @mgr: MST topology manager
5238 * %NULL - denoting the root port - the function returns %true if @port is in
5239 * @mgr's topology.
5248 mutex_lock(&mgr->lock); in drm_dp_mst_port_downstream_of_parent()
5250 mutex_unlock(&mgr->lock); in drm_dp_mst_port_downstream_of_parent()
5274 list_for_each_entry(payload, &state->payloads, next) { in drm_dp_mst_atomic_check_mstb_bw_limit()
5275 if (!payload->pbn || in drm_dp_mst_atomic_check_mstb_bw_limit()
5276 !drm_dp_mst_port_downstream_of_branch(payload->port, mstb)) in drm_dp_mst_atomic_check_mstb_bw_limit()
5285 if (mstb->port_parent) in drm_dp_mst_atomic_check_mstb_bw_limit()
5286 drm_dbg_atomic(mstb->mgr->dev, in drm_dp_mst_atomic_check_mstb_bw_limit()
5288 mstb->port_parent->parent, mstb->port_parent, mstb); in drm_dp_mst_atomic_check_mstb_bw_limit()
5290 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); in drm_dp_mst_atomic_check_mstb_bw_limit()
5292 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_atomic_check_mstb_bw_limit()
5311 if (port->pdt == DP_PEER_DEVICE_NONE) in drm_dp_mst_atomic_check_port_bw_limit()
5314 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_mst_atomic_check_port_bw_limit()
5323 if (!port->full_pbn) { in drm_dp_mst_atomic_check_port_bw_limit()
5324 drm_dbg_atomic(port->mgr->dev, in drm_dp_mst_atomic_check_port_bw_limit()
5326 port->parent, port); in drm_dp_mst_atomic_check_port_bw_limit()
5328 return -EINVAL; in drm_dp_mst_atomic_check_port_bw_limit()
5331 pbn_used = payload->pbn; in drm_dp_mst_atomic_check_port_bw_limit()
5333 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, in drm_dp_mst_atomic_check_port_bw_limit()
5340 if (pbn_used > port->full_pbn) { in drm_dp_mst_atomic_check_port_bw_limit()
5341 drm_dbg_atomic(port->mgr->dev, in drm_dp_mst_atomic_check_port_bw_limit()
5343 port->parent, port, pbn_used, port->full_pbn); in drm_dp_mst_atomic_check_port_bw_limit()
5345 return -ENOSPC; in drm_dp_mst_atomic_check_port_bw_limit()
5348 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", in drm_dp_mst_atomic_check_port_bw_limit()
5349 port->parent, port, pbn_used, port->full_pbn); in drm_dp_mst_atomic_check_port_bw_limit()
5359 int avail_slots = mst_state->total_avail_slots, payload_count = 0; in drm_dp_mst_atomic_check_payload_alloc_limits()
5361 list_for_each_entry(payload, &mst_state->payloads, next) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5362 /* Releasing payloads is always OK-even if the port is gone */ in drm_dp_mst_atomic_check_payload_alloc_limits()
5363 if (payload->delete) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5364 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5365 payload->port); in drm_dp_mst_atomic_check_payload_alloc_limits()
5369 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5370 payload->port, payload->time_slots); in drm_dp_mst_atomic_check_payload_alloc_limits()
5372 avail_slots -= payload->time_slots; in drm_dp_mst_atomic_check_payload_alloc_limits()
5374 drm_dbg_atomic(mgr->dev, in drm_dp_mst_atomic_check_payload_alloc_limits()
5376 payload->port, mst_state, avail_slots + payload->time_slots); in drm_dp_mst_atomic_check_payload_alloc_limits()
5377 return -ENOSPC; in drm_dp_mst_atomic_check_payload_alloc_limits()
5380 if (++payload_count > mgr->max_payloads) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5381 drm_dbg_atomic(mgr->dev, in drm_dp_mst_atomic_check_payload_alloc_limits()
5383 mgr, mst_state, mgr->max_payloads); in drm_dp_mst_atomic_check_payload_alloc_limits()
5384 return -EINVAL; in drm_dp_mst_atomic_check_payload_alloc_limits()
5388 if (!payload->vcpi) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5389 payload->vcpi = ffz(mst_state->payload_mask) + 1; in drm_dp_mst_atomic_check_payload_alloc_limits()
5390 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5391 payload->port, payload->vcpi); in drm_dp_mst_atomic_check_payload_alloc_limits()
5392 mst_state->payload_mask |= BIT(payload->vcpi - 1); in drm_dp_mst_atomic_check_payload_alloc_limits()
5397 mst_state->pbn_div.full = dfixed_const(0); in drm_dp_mst_atomic_check_payload_alloc_limits()
5399 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5400 mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots, in drm_dp_mst_atomic_check_payload_alloc_limits()
5401 mst_state->total_avail_slots - avail_slots); in drm_dp_mst_atomic_check_payload_alloc_limits()
5409 * @mgr: MST topology manager
5411 * Whenever there is a change in mst topology
5414 * CRTCs in that topology
5433 list_for_each_entry(pos, &mst_state->payloads, next) { in drm_dp_mst_add_affected_dsc_crtcs()
5435 connector = pos->port->connector; in drm_dp_mst_add_affected_dsc_crtcs()
5438 return -EINVAL; in drm_dp_mst_add_affected_dsc_crtcs()
5445 crtc = conn_state->crtc; in drm_dp_mst_add_affected_dsc_crtcs()
5450 if (!drm_dp_mst_dsc_aux_for_port(pos->port)) in drm_dp_mst_add_affected_dsc_crtcs()
5453 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); in drm_dp_mst_add_affected_dsc_crtcs()
5458 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", in drm_dp_mst_add_affected_dsc_crtcs()
5461 crtc_state->mode_changed = true; in drm_dp_mst_add_affected_dsc_crtcs()
5468 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5488 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); in drm_dp_mst_atomic_enable_dsc()
5494 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5497 return -EINVAL; in drm_dp_mst_atomic_enable_dsc()
5500 if (payload->dsc_enabled == enable) { in drm_dp_mst_atomic_enable_dsc()
5501 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5503 port, enable, payload->time_slots); in drm_dp_mst_atomic_enable_dsc()
5504 time_slots = payload->time_slots; in drm_dp_mst_atomic_enable_dsc()
5508 time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn); in drm_dp_mst_atomic_enable_dsc()
5509 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5513 return -EINVAL; in drm_dp_mst_atomic_enable_dsc()
5516 payload->dsc_enabled = enable; in drm_dp_mst_atomic_enable_dsc()
5523 * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager
5529 * Checks the given MST manager's topology state for an atomic update to ensure
5543 * - 0 if the new state is valid
5544 * - %-ENOSPC, if the new state is invalid, because of BW limitation
5547 * - The non-root port where a BW limit check failed
5552 * - %NULL if the BW limit check failed at the root port
5556 * - %-EINVAL, if the new state is invalid, because the root port has
5568 if (!mgr->mst_state) in drm_dp_mst_atomic_check_mgr()
5571 mutex_lock(&mgr->lock); in drm_dp_mst_atomic_check_mgr()
5572 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, in drm_dp_mst_atomic_check_mgr()
5575 mutex_unlock(&mgr->lock); in drm_dp_mst_atomic_check_mgr()
5585 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5589 * Checks the given topology state for an atomic update to ensure that it's
5631 * drm_atomic_get_mst_topology_state: get MST topology state
5633 * @mgr: MST topology manager, also the private object in this case
5637 * topology object.
5640 * The MST topology state or error pointer.
5645 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); in drm_atomic_get_mst_topology_state()
5650 * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
5652 * @mgr: MST topology manager, also the private object in this case
5656 * topology object.
5659 * The old MST topology state, or NULL if there's no topology state for this MST mgr
5667 drm_atomic_get_old_private_obj_state(state, &mgr->base); in drm_atomic_get_old_mst_topology_state()
5674 * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
5676 * @mgr: MST topology manager, also the private object in this case
5680 * topology object.
5683 * The new MST topology state, or NULL if there's no topology state for this MST mgr
5691 drm_atomic_get_new_private_obj_state(state, &mgr->base); in drm_atomic_get_new_mst_topology_state()
5698 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5700 * @dev: device providing this structure - for i2c addition.
5715 mutex_init(&mgr->lock); in drm_dp_mst_topology_mgr_init()
5716 mutex_init(&mgr->qlock); in drm_dp_mst_topology_mgr_init()
5717 mutex_init(&mgr->delayed_destroy_lock); in drm_dp_mst_topology_mgr_init()
5718 mutex_init(&mgr->up_req_lock); in drm_dp_mst_topology_mgr_init()
5719 mutex_init(&mgr->probe_lock); in drm_dp_mst_topology_mgr_init()
5721 mutex_init(&mgr->topology_ref_history_lock); in drm_dp_mst_topology_mgr_init()
5724 INIT_LIST_HEAD(&mgr->tx_msg_downq); in drm_dp_mst_topology_mgr_init()
5725 INIT_LIST_HEAD(&mgr->destroy_port_list); in drm_dp_mst_topology_mgr_init()
5726 INIT_LIST_HEAD(&mgr->destroy_branch_device_list); in drm_dp_mst_topology_mgr_init()
5727 INIT_LIST_HEAD(&mgr->up_req_list); in drm_dp_mst_topology_mgr_init()
5731 * requeuing will be also flushed when deiniting the topology manager. in drm_dp_mst_topology_mgr_init()
5733 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0); in drm_dp_mst_topology_mgr_init()
5734 if (mgr->delayed_destroy_wq == NULL) in drm_dp_mst_topology_mgr_init()
5735 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5737 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); in drm_dp_mst_topology_mgr_init()
5738 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); in drm_dp_mst_topology_mgr_init()
5739 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); in drm_dp_mst_topology_mgr_init()
5740 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); in drm_dp_mst_topology_mgr_init()
5741 init_waitqueue_head(&mgr->tx_waitq); in drm_dp_mst_topology_mgr_init()
5742 mgr->dev = dev; in drm_dp_mst_topology_mgr_init()
5743 mgr->aux = aux; in drm_dp_mst_topology_mgr_init()
5744 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; in drm_dp_mst_topology_mgr_init()
5745 mgr->max_payloads = max_payloads; in drm_dp_mst_topology_mgr_init()
5746 mgr->conn_base_id = conn_base_id; in drm_dp_mst_topology_mgr_init()
5750 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5752 mst_state->total_avail_slots = 63; in drm_dp_mst_topology_mgr_init()
5753 mst_state->start_slot = 1; in drm_dp_mst_topology_mgr_init()
5755 mst_state->mgr = mgr; in drm_dp_mst_topology_mgr_init()
5756 INIT_LIST_HEAD(&mst_state->payloads); in drm_dp_mst_topology_mgr_init()
5758 drm_atomic_private_obj_init(dev, &mgr->base, in drm_dp_mst_topology_mgr_init()
5759 &mst_state->base, in drm_dp_mst_topology_mgr_init()
5767 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5773 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_destroy()
5775 if (mgr->delayed_destroy_wq) { in drm_dp_mst_topology_mgr_destroy()
5776 destroy_workqueue(mgr->delayed_destroy_wq); in drm_dp_mst_topology_mgr_destroy()
5777 mgr->delayed_destroy_wq = NULL; in drm_dp_mst_topology_mgr_destroy()
5779 mgr->dev = NULL; in drm_dp_mst_topology_mgr_destroy()
5780 mgr->aux = NULL; in drm_dp_mst_topology_mgr_destroy()
5781 drm_atomic_private_obj_fini(&mgr->base); in drm_dp_mst_topology_mgr_destroy()
5782 mgr->funcs = NULL; in drm_dp_mst_topology_mgr_destroy()
5784 mutex_destroy(&mgr->delayed_destroy_lock); in drm_dp_mst_topology_mgr_destroy()
5785 mutex_destroy(&mgr->qlock); in drm_dp_mst_topology_mgr_destroy()
5786 mutex_destroy(&mgr->lock); in drm_dp_mst_topology_mgr_destroy()
5787 mutex_destroy(&mgr->up_req_lock); in drm_dp_mst_topology_mgr_destroy()
5788 mutex_destroy(&mgr->probe_lock); in drm_dp_mst_topology_mgr_destroy()
5790 mutex_destroy(&mgr->topology_ref_history_lock); in drm_dp_mst_topology_mgr_destroy()
5799 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) in remote_i2c_read_ok()
5802 for (i = 0; i < num - 1; i++) { in remote_i2c_read_ok()
5808 return msgs[num - 1].flags & I2C_M_RD && in remote_i2c_read_ok()
5809 msgs[num - 1].len <= 0xff; in remote_i2c_read_ok()
5816 for (i = 0; i < num - 1; i++) { in remote_i2c_write_ok()
5822 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff; in remote_i2c_write_ok()
5829 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_read()
5837 msg.u.i2c_read.num_transactions = num - 1; in drm_dp_mst_i2c_read()
5838 msg.u.i2c_read.port_number = port->port_num; in drm_dp_mst_i2c_read()
5839 for (i = 0; i < num - 1; i++) { in drm_dp_mst_i2c_read()
5845 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; in drm_dp_mst_i2c_read()
5846 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; in drm_dp_mst_i2c_read()
5850 ret = -ENOMEM; in drm_dp_mst_i2c_read()
5854 txmsg->dst = mstb; in drm_dp_mst_i2c_read()
5862 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_i2c_read()
5863 ret = -EREMOTEIO; in drm_dp_mst_i2c_read()
5866 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { in drm_dp_mst_i2c_read()
5867 ret = -EIO; in drm_dp_mst_i2c_read()
5870 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); in drm_dp_mst_i2c_read()
5882 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_write()
5890 ret = -ENOMEM; in drm_dp_mst_i2c_write()
5896 msg.u.i2c_write.port_number = port->port_num; in drm_dp_mst_i2c_write()
5902 txmsg->dst = mstb; in drm_dp_mst_i2c_write()
5909 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_i2c_write()
5910 ret = -EREMOTEIO; in drm_dp_mst_i2c_write()
5923 /* I2C device */
5927 struct drm_dp_aux *aux = adapter->algo_data; in drm_dp_mst_i2c_xfer()
5931 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_xfer()
5934 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_mst_i2c_xfer()
5936 return -EREMOTEIO; in drm_dp_mst_i2c_xfer()
5943 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n"); in drm_dp_mst_i2c_xfer()
5944 ret = -EIO; in drm_dp_mst_i2c_xfer()
5965 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5966 * @port: The port to add the I2C bus on
5972 struct drm_dp_aux *aux = &port->aux; in drm_dp_mst_register_i2c_bus()
5973 struct device *parent_dev = port->mgr->dev->dev; in drm_dp_mst_register_i2c_bus()
5975 aux->ddc.algo = &drm_dp_mst_i2c_algo; in drm_dp_mst_register_i2c_bus()
5976 aux->ddc.algo_data = aux; in drm_dp_mst_register_i2c_bus()
5977 aux->ddc.retries = 3; in drm_dp_mst_register_i2c_bus()
5979 aux->ddc.owner = THIS_MODULE; in drm_dp_mst_register_i2c_bus()
5981 aux->ddc.dev.parent = parent_dev; in drm_dp_mst_register_i2c_bus()
5982 aux->ddc.dev.of_node = parent_dev->of_node; in drm_dp_mst_register_i2c_bus()
5984 strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev), in drm_dp_mst_register_i2c_bus()
5985 sizeof(aux->ddc.name)); in drm_dp_mst_register_i2c_bus()
5987 return i2c_add_adapter(&aux->ddc); in drm_dp_mst_register_i2c_bus()
5991 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5992 * @port: The port to remove the I2C bus from
5996 i2c_del_adapter(&port->aux.ddc); in drm_dp_mst_unregister_i2c_bus()
6000 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
6003 * A single physical MST hub object can be represented in the topology
6010 * May acquire mgr->lock
6019 if (!port || port->dpcd_rev < DP_DPCD_REV_14) in drm_dp_mst_is_virtual_dpcd()
6026 /* DP-to-HDMI Protocol Converter */ in drm_dp_mst_is_virtual_dpcd()
6027 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && in drm_dp_mst_is_virtual_dpcd()
6028 !port->mcs && in drm_dp_mst_is_virtual_dpcd()
6029 port->ldps) in drm_dp_mst_is_virtual_dpcd()
6032 /* DP-to-DP */ in drm_dp_mst_is_virtual_dpcd()
6033 mutex_lock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
6034 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && in drm_dp_mst_is_virtual_dpcd()
6035 port->mstb && in drm_dp_mst_is_virtual_dpcd()
6036 port->mstb->num_ports == 2) { in drm_dp_mst_is_virtual_dpcd()
6037 list_for_each_entry(downstream_port, &port->mstb->ports, next) { in drm_dp_mst_is_virtual_dpcd()
6038 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && in drm_dp_mst_is_virtual_dpcd()
6039 !downstream_port->input) { in drm_dp_mst_is_virtual_dpcd()
6040 mutex_unlock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
6045 mutex_unlock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
6051 * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent
6059 if (!port->parent || !port->parent->port_parent) in drm_dp_mst_aux_for_parent()
6062 return &port->parent->port_parent->aux; in drm_dp_mst_aux_for_parent()
6067 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
6095 if (port->parent->port_parent) in drm_dp_mst_dsc_aux_for_port()
6096 immediate_upstream_port = port->parent->port_parent; in drm_dp_mst_dsc_aux_for_port()
6107 !fec_port->fec_capable) in drm_dp_mst_dsc_aux_for_port()
6110 fec_port = fec_port->parent->port_parent; in drm_dp_mst_dsc_aux_for_port()
6113 /* DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
6115 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6118 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6121 if (drm_dp_dpcd_read(&immediate_upstream_port->aux, in drm_dp_mst_dsc_aux_for_port()
6125 /* Enpoint decompression with DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
6129 port->passthrough_aux = &immediate_upstream_port->aux; in drm_dp_mst_dsc_aux_for_port()
6130 return &port->aux; in drm_dp_mst_dsc_aux_for_port()
6133 /* Virtual DPCD decompression with DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
6134 return &immediate_upstream_port->aux; in drm_dp_mst_dsc_aux_for_port()
6137 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ in drm_dp_mst_dsc_aux_for_port()
6139 return &port->aux; in drm_dp_mst_dsc_aux_for_port()
6144 * - Physical aux has Synaptics OUI in drm_dp_mst_dsc_aux_for_port()
6145 * - DPv1.4 or higher in drm_dp_mst_dsc_aux_for_port()
6146 * - Port is on primary branch device in drm_dp_mst_dsc_aux_for_port()
6147 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) in drm_dp_mst_dsc_aux_for_port()
6150 immediate_upstream_aux = &immediate_upstream_port->aux; in drm_dp_mst_dsc_aux_for_port()
6152 immediate_upstream_aux = port->mgr->aux; in drm_dp_mst_dsc_aux_for_port()
6179 * connected to the GPU is capable of DSC - in drm_dp_mst_dsc_aux_for_port()
6183 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6186 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6191 return &port->aux; in drm_dp_mst_dsc_aux_for_port()