Lines Matching +full:i2c +full:- +full:topology

26 #include <linux/i2c.h>
57 * protocol. The helpers contain a topology manager and bandwidth manager.
207 number_of_bits--; in drm_dp_msg_header_crc4()
211 bitshift--; in drm_dp_msg_header_crc4()
223 number_of_bits--; in drm_dp_msg_header_crc4()
241 number_of_bits--; in drm_dp_msg_data_crc4()
245 bitshift--; in drm_dp_msg_data_crc4()
257 number_of_bits--; in drm_dp_msg_data_crc4()
269 size += (hdr->lct / 2); in drm_dp_calc_sb_hdr_size()
280 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); in drm_dp_encode_sideband_msg_hdr()
281 for (i = 0; i < (hdr->lct / 2); i++) in drm_dp_encode_sideband_msg_hdr()
282 buf[idx++] = hdr->rad[i]; in drm_dp_encode_sideband_msg_hdr()
283 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | in drm_dp_encode_sideband_msg_hdr()
284 (hdr->msg_len & 0x3f); in drm_dp_encode_sideband_msg_hdr()
285 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); in drm_dp_encode_sideband_msg_hdr()
287 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); in drm_dp_encode_sideband_msg_hdr()
288 buf[idx - 1] |= (crc4 & 0xf); in drm_dp_encode_sideband_msg_hdr()
308 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); in drm_dp_decode_sideband_msg_hdr()
310 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { in drm_dp_decode_sideband_msg_hdr()
311 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); in drm_dp_decode_sideband_msg_hdr()
315 hdr->lct = (buf[0] & 0xf0) >> 4; in drm_dp_decode_sideband_msg_hdr()
316 hdr->lcr = (buf[0] & 0xf); in drm_dp_decode_sideband_msg_hdr()
318 for (i = 0; i < (hdr->lct / 2); i++) in drm_dp_decode_sideband_msg_hdr()
319 hdr->rad[i] = buf[idx++]; in drm_dp_decode_sideband_msg_hdr()
320 hdr->broadcast = (buf[idx] >> 7) & 0x1; in drm_dp_decode_sideband_msg_hdr()
321 hdr->path_msg = (buf[idx] >> 6) & 0x1; in drm_dp_decode_sideband_msg_hdr()
322 hdr->msg_len = buf[idx] & 0x3f; in drm_dp_decode_sideband_msg_hdr()
324 hdr->somt = (buf[idx] >> 7) & 0x1; in drm_dp_decode_sideband_msg_hdr()
325 hdr->eomt = (buf[idx] >> 6) & 0x1; in drm_dp_decode_sideband_msg_hdr()
326 hdr->seqno = (buf[idx] >> 4) & 0x1; in drm_dp_decode_sideband_msg_hdr()
338 u8 *buf = raw->msg; in drm_dp_encode_sideband_req()
340 buf[idx++] = req->req_type & 0x7f; in drm_dp_encode_sideband_req()
342 switch (req->req_type) { in drm_dp_encode_sideband_req()
346 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
350 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | in drm_dp_encode_sideband_req()
351 (req->u.allocate_payload.number_sdp_streams & 0xf); in drm_dp_encode_sideband_req()
353 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); in drm_dp_encode_sideband_req()
355 buf[idx] = (req->u.allocate_payload.pbn >> 8); in drm_dp_encode_sideband_req()
357 buf[idx] = (req->u.allocate_payload.pbn & 0xff); in drm_dp_encode_sideband_req()
359 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { in drm_dp_encode_sideband_req()
360 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | in drm_dp_encode_sideband_req()
361 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); in drm_dp_encode_sideband_req()
364 if (req->u.allocate_payload.number_sdp_streams & 1) { in drm_dp_encode_sideband_req()
365 i = req->u.allocate_payload.number_sdp_streams - 1; in drm_dp_encode_sideband_req()
366 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; in drm_dp_encode_sideband_req()
371 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
373 buf[idx] = (req->u.query_payload.vcpi & 0x7f); in drm_dp_encode_sideband_req()
377 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
378 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; in drm_dp_encode_sideband_req()
380 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; in drm_dp_encode_sideband_req()
382 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); in drm_dp_encode_sideband_req()
384 buf[idx] = (req->u.dpcd_read.num_bytes); in drm_dp_encode_sideband_req()
389 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
390 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; in drm_dp_encode_sideband_req()
392 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; in drm_dp_encode_sideband_req()
394 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); in drm_dp_encode_sideband_req()
396 buf[idx] = (req->u.dpcd_write.num_bytes); in drm_dp_encode_sideband_req()
398 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); in drm_dp_encode_sideband_req()
399 idx += req->u.dpcd_write.num_bytes; in drm_dp_encode_sideband_req()
402 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
403 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); in drm_dp_encode_sideband_req()
405 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { in drm_dp_encode_sideband_req()
406 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; in drm_dp_encode_sideband_req()
408 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; in drm_dp_encode_sideband_req()
410 …memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes… in drm_dp_encode_sideband_req()
411 idx += req->u.i2c_read.transactions[i].num_bytes; in drm_dp_encode_sideband_req()
413 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; in drm_dp_encode_sideband_req()
414 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); in drm_dp_encode_sideband_req()
417 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; in drm_dp_encode_sideband_req()
419 buf[idx] = (req->u.i2c_read.num_bytes_read); in drm_dp_encode_sideband_req()
424 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; in drm_dp_encode_sideband_req()
426 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; in drm_dp_encode_sideband_req()
428 buf[idx] = (req->u.i2c_write.num_bytes); in drm_dp_encode_sideband_req()
430 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); in drm_dp_encode_sideband_req()
431 idx += req->u.i2c_write.num_bytes; in drm_dp_encode_sideband_req()
436 msg = &req->u.enc_status; in drm_dp_encode_sideband_req()
437 buf[idx] = msg->stream_id; in drm_dp_encode_sideband_req()
439 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id)); in drm_dp_encode_sideband_req()
440 idx += sizeof(msg->client_id); in drm_dp_encode_sideband_req()
442 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event); in drm_dp_encode_sideband_req()
443 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0; in drm_dp_encode_sideband_req()
444 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior); in drm_dp_encode_sideband_req()
445 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0; in drm_dp_encode_sideband_req()
450 raw->cur_len = idx; in drm_dp_encode_sideband_req()
459 const u8 *buf = raw->msg; in drm_dp_decode_sideband_req()
462 req->req_type = buf[idx++] & 0x7f; in drm_dp_decode_sideband_req()
463 switch (req->req_type) { in drm_dp_decode_sideband_req()
467 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
472 &req->u.allocate_payload; in drm_dp_decode_sideband_req()
474 a->number_sdp_streams = buf[idx] & 0xf; in drm_dp_decode_sideband_req()
475 a->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
478 a->vcpi = buf[idx] & 0x7f; in drm_dp_decode_sideband_req()
480 a->pbn = buf[++idx] << 8; in drm_dp_decode_sideband_req()
481 a->pbn |= buf[++idx]; in drm_dp_decode_sideband_req()
484 for (i = 0; i < a->number_sdp_streams; i++) { in drm_dp_decode_sideband_req()
485 a->sdp_stream_sink[i] = in drm_dp_decode_sideband_req()
491 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
493 req->u.query_payload.vcpi = buf[idx] & 0x7f; in drm_dp_decode_sideband_req()
497 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read; in drm_dp_decode_sideband_req()
499 r->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
501 r->dpcd_address = (buf[idx] << 16) & 0xf0000; in drm_dp_decode_sideband_req()
502 r->dpcd_address |= (buf[++idx] << 8) & 0xff00; in drm_dp_decode_sideband_req()
503 r->dpcd_address |= buf[++idx] & 0xff; in drm_dp_decode_sideband_req()
505 r->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
511 &req->u.dpcd_write; in drm_dp_decode_sideband_req()
513 w->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
515 w->dpcd_address = (buf[idx] << 16) & 0xf0000; in drm_dp_decode_sideband_req()
516 w->dpcd_address |= (buf[++idx] << 8) & 0xff00; in drm_dp_decode_sideband_req()
517 w->dpcd_address |= buf[++idx] & 0xff; in drm_dp_decode_sideband_req()
519 w->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
521 w->bytes = kmemdup(&buf[++idx], w->num_bytes, in drm_dp_decode_sideband_req()
523 if (!w->bytes) in drm_dp_decode_sideband_req()
524 return -ENOMEM; in drm_dp_decode_sideband_req()
529 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read; in drm_dp_decode_sideband_req()
533 r->num_transactions = buf[idx] & 0x3; in drm_dp_decode_sideband_req()
534 r->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
535 for (i = 0; i < r->num_transactions; i++) { in drm_dp_decode_sideband_req()
536 tx = &r->transactions[i]; in drm_dp_decode_sideband_req()
538 tx->i2c_dev_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
539 tx->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
540 tx->bytes = kmemdup(&buf[++idx], in drm_dp_decode_sideband_req()
541 tx->num_bytes, in drm_dp_decode_sideband_req()
543 if (!tx->bytes) { in drm_dp_decode_sideband_req()
547 idx += tx->num_bytes; in drm_dp_decode_sideband_req()
548 tx->no_stop_bit = (buf[idx] >> 5) & 0x1; in drm_dp_decode_sideband_req()
549 tx->i2c_transaction_delay = buf[idx] & 0xf; in drm_dp_decode_sideband_req()
553 for (i = 0; i < r->num_transactions; i++) { in drm_dp_decode_sideband_req()
554 tx = &r->transactions[i]; in drm_dp_decode_sideband_req()
555 kfree(tx->bytes); in drm_dp_decode_sideband_req()
557 return -ENOMEM; in drm_dp_decode_sideband_req()
560 r->read_i2c_device_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
561 r->num_bytes_read = buf[++idx]; in drm_dp_decode_sideband_req()
566 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write; in drm_dp_decode_sideband_req()
568 w->port_number = (buf[idx] >> 4) & 0xf; in drm_dp_decode_sideband_req()
569 w->write_i2c_device_id = buf[++idx] & 0x7f; in drm_dp_decode_sideband_req()
570 w->num_bytes = buf[++idx]; in drm_dp_decode_sideband_req()
571 w->bytes = kmemdup(&buf[++idx], w->num_bytes, in drm_dp_decode_sideband_req()
573 if (!w->bytes) in drm_dp_decode_sideband_req()
574 return -ENOMEM; in drm_dp_decode_sideband_req()
578 req->u.enc_status.stream_id = buf[idx++]; in drm_dp_decode_sideband_req()
579 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++) in drm_dp_decode_sideband_req()
580 req->u.enc_status.client_id[i] = buf[idx++]; in drm_dp_decode_sideband_req()
582 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0), in drm_dp_decode_sideband_req()
584 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2), in drm_dp_decode_sideband_req()
586 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3), in drm_dp_decode_sideband_req()
588 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5), in drm_dp_decode_sideband_req()
604 if (req->req_type == DP_LINK_ADDRESS) { in drm_dp_dump_sideband_msg_req_body()
606 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type)); in drm_dp_dump_sideband_msg_req_body()
610 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type)); in drm_dp_dump_sideband_msg_req_body()
613 switch (req->req_type) { in drm_dp_dump_sideband_msg_req_body()
617 P("port=%d\n", req->u.port_num.port_number); in drm_dp_dump_sideband_msg_req_body()
621 req->u.allocate_payload.port_number, in drm_dp_dump_sideband_msg_req_body()
622 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn, in drm_dp_dump_sideband_msg_req_body()
623 req->u.allocate_payload.number_sdp_streams, in drm_dp_dump_sideband_msg_req_body()
624 req->u.allocate_payload.number_sdp_streams, in drm_dp_dump_sideband_msg_req_body()
625 req->u.allocate_payload.sdp_stream_sink); in drm_dp_dump_sideband_msg_req_body()
629 req->u.query_payload.port_number, in drm_dp_dump_sideband_msg_req_body()
630 req->u.query_payload.vcpi); in drm_dp_dump_sideband_msg_req_body()
634 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address, in drm_dp_dump_sideband_msg_req_body()
635 req->u.dpcd_read.num_bytes); in drm_dp_dump_sideband_msg_req_body()
639 req->u.dpcd_write.port_number, in drm_dp_dump_sideband_msg_req_body()
640 req->u.dpcd_write.dpcd_address, in drm_dp_dump_sideband_msg_req_body()
641 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes, in drm_dp_dump_sideband_msg_req_body()
642 req->u.dpcd_write.bytes); in drm_dp_dump_sideband_msg_req_body()
646 req->u.i2c_read.port_number, in drm_dp_dump_sideband_msg_req_body()
647 req->u.i2c_read.num_transactions, in drm_dp_dump_sideband_msg_req_body()
648 req->u.i2c_read.read_i2c_device_id, in drm_dp_dump_sideband_msg_req_body()
649 req->u.i2c_read.num_bytes_read); in drm_dp_dump_sideband_msg_req_body()
652 for (i = 0; i < req->u.i2c_read.num_transactions; i++) { in drm_dp_dump_sideband_msg_req_body()
654 &req->u.i2c_read.transactions[i]; in drm_dp_dump_sideband_msg_req_body()
657 i, rtx->i2c_dev_id, rtx->num_bytes, in drm_dp_dump_sideband_msg_req_body()
658 rtx->no_stop_bit, rtx->i2c_transaction_delay, in drm_dp_dump_sideband_msg_req_body()
659 rtx->num_bytes, rtx->bytes); in drm_dp_dump_sideband_msg_req_body()
664 req->u.i2c_write.port_number, in drm_dp_dump_sideband_msg_req_body()
665 req->u.i2c_write.write_i2c_device_id, in drm_dp_dump_sideband_msg_req_body()
666 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, in drm_dp_dump_sideband_msg_req_body()
667 req->u.i2c_write.bytes); in drm_dp_dump_sideband_msg_req_body()
672 req->u.enc_status.stream_id, in drm_dp_dump_sideband_msg_req_body()
673 (int)ARRAY_SIZE(req->u.enc_status.client_id), in drm_dp_dump_sideband_msg_req_body()
674 req->u.enc_status.client_id, req->u.enc_status.stream_event, in drm_dp_dump_sideband_msg_req_body()
675 req->u.enc_status.valid_stream_event, in drm_dp_dump_sideband_msg_req_body()
676 req->u.enc_status.stream_behavior, in drm_dp_dump_sideband_msg_req_body()
677 req->u.enc_status.valid_stream_behavior); in drm_dp_dump_sideband_msg_req_body()
696 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf, in drm_dp_mst_dump_sideband_msg_tx()
699 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno, in drm_dp_mst_dump_sideband_msg_tx()
700 drm_dp_mst_sideband_tx_state_str(txmsg->state), in drm_dp_mst_dump_sideband_msg_tx()
701 txmsg->path_msg, buf); in drm_dp_mst_dump_sideband_msg_tx()
736 u8 *buf = raw->msg; in drm_dp_encode_sideband_reply()
738 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); in drm_dp_encode_sideband_reply()
740 raw->cur_len = idx; in drm_dp_encode_sideband_reply()
748 * ignore out-of-order messages or messages that are part of a in drm_dp_sideband_msg_set_header()
751 if (!hdr->somt && !msg->have_somt) in drm_dp_sideband_msg_set_header()
755 msg->curchunk_idx = 0; in drm_dp_sideband_msg_set_header()
756 msg->curchunk_len = hdr->msg_len; in drm_dp_sideband_msg_set_header()
757 msg->curchunk_hdrlen = hdrlen; in drm_dp_sideband_msg_set_header()
759 /* we have already gotten an somt - don't bother parsing */ in drm_dp_sideband_msg_set_header()
760 if (hdr->somt && msg->have_somt) in drm_dp_sideband_msg_set_header()
763 if (hdr->somt) { in drm_dp_sideband_msg_set_header()
764 memcpy(&msg->initial_hdr, hdr, in drm_dp_sideband_msg_set_header()
766 msg->have_somt = true; in drm_dp_sideband_msg_set_header()
768 if (hdr->eomt) in drm_dp_sideband_msg_set_header()
769 msg->have_eomt = true; in drm_dp_sideband_msg_set_header()
780 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); in drm_dp_sideband_append_payload()
781 msg->curchunk_idx += replybuflen; in drm_dp_sideband_append_payload()
783 if (msg->curchunk_idx >= msg->curchunk_len) { in drm_dp_sideband_append_payload()
785 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); in drm_dp_sideband_append_payload()
786 if (crc4 != msg->chunk[msg->curchunk_len - 1]) in drm_dp_sideband_append_payload()
789 msg->chunk, msg->curchunk_len, false); in drm_dp_sideband_append_payload()
791 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); in drm_dp_sideband_append_payload()
792 msg->curlen += msg->curchunk_len - 1; in drm_dp_sideband_append_payload()
804 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_link_address()
806 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_link_address()
808 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
810 for (i = 0; i < repmsg->u.link_addr.nports; i++) { in drm_dp_sideband_parse_link_address()
811 if (raw->msg[idx] & 0x80) in drm_dp_sideband_parse_link_address()
812 repmsg->u.link_addr.ports[i].input_port = 1; in drm_dp_sideband_parse_link_address()
814 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; in drm_dp_sideband_parse_link_address()
815 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_link_address()
818 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
820 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; in drm_dp_sideband_parse_link_address()
821 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; in drm_dp_sideband_parse_link_address()
822 if (repmsg->u.link_addr.ports[i].input_port == 0) in drm_dp_sideband_parse_link_address()
823 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; in drm_dp_sideband_parse_link_address()
825 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
827 if (repmsg->u.link_addr.ports[i].input_port == 0) { in drm_dp_sideband_parse_link_address()
828 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); in drm_dp_sideband_parse_link_address()
830 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
832 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_link_address()
834 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
836 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_link_address()
837 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_link_address()
841 if (idx > raw->curlen) in drm_dp_sideband_parse_link_address()
847 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_link_address()
856 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_remote_dpcd_read()
858 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_read()
860 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; in drm_dp_sideband_parse_remote_dpcd_read()
862 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_read()
865 …memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_by… in drm_dp_sideband_parse_remote_dpcd_read()
868 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_dpcd_read()
877 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; in drm_dp_sideband_parse_remote_dpcd_write()
879 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_dpcd_write()
883 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_dpcd_write()
892 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); in drm_dp_sideband_parse_remote_i2c_read_ack()
894 if (idx > raw->curlen) in drm_dp_sideband_parse_remote_i2c_read_ack()
896 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; in drm_dp_sideband_parse_remote_i2c_read_ack()
899 …memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_byte… in drm_dp_sideband_parse_remote_i2c_read_ack()
902 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_remote_i2c_read_ack()
911 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_enum_path_resources_ack()
912 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; in drm_dp_sideband_parse_enum_path_resources_ack()
914 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
916 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_enum_path_resources_ack()
918 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
920 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_enum_path_resources_ack()
922 if (idx > raw->curlen) in drm_dp_sideband_parse_enum_path_resources_ack()
926 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_enum_path_resources_ack()
935 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_allocate_payload_ack()
937 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
939 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; in drm_dp_sideband_parse_allocate_payload_ack()
941 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
943 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); in drm_dp_sideband_parse_allocate_payload_ack()
945 if (idx > raw->curlen) in drm_dp_sideband_parse_allocate_payload_ack()
949 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_allocate_payload_ack()
958 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_query_payload_ack()
960 if (idx > raw->curlen) in drm_dp_sideband_parse_query_payload_ack()
962 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); in drm_dp_sideband_parse_query_payload_ack()
964 if (idx > raw->curlen) in drm_dp_sideband_parse_query_payload_ack()
968 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_query_payload_ack()
977 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; in drm_dp_sideband_parse_power_updown_phy_ack()
979 if (idx > raw->curlen) { in drm_dp_sideband_parse_power_updown_phy_ack()
981 idx, raw->curlen); in drm_dp_sideband_parse_power_updown_phy_ack()
994 reply = &repmsg->u.enc_status; in drm_dp_sideband_parse_query_stream_enc_status()
996 reply->stream_id = raw->msg[3]; in drm_dp_sideband_parse_query_stream_enc_status()
998 reply->reply_signed = raw->msg[2] & BIT(0); in drm_dp_sideband_parse_query_stream_enc_status()
1008 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4); in drm_dp_sideband_parse_query_stream_enc_status()
1009 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3); in drm_dp_sideband_parse_query_stream_enc_status()
1011 reply->query_capable_device_present = raw->msg[2] & BIT(5); in drm_dp_sideband_parse_query_stream_enc_status()
1012 reply->legacy_device_present = raw->msg[2] & BIT(6); in drm_dp_sideband_parse_query_stream_enc_status()
1013 reply->unauthorizable_device_present = raw->msg[2] & BIT(7); in drm_dp_sideband_parse_query_stream_enc_status()
1015 reply->auth_completed = !!(raw->msg[1] & BIT(3)); in drm_dp_sideband_parse_query_stream_enc_status()
1016 reply->encryption_enabled = !!(raw->msg[1] & BIT(4)); in drm_dp_sideband_parse_query_stream_enc_status()
1017 reply->repeater_present = !!(raw->msg[1] & BIT(5)); in drm_dp_sideband_parse_query_stream_enc_status()
1018 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6; in drm_dp_sideband_parse_query_stream_enc_status()
1028 msg->reply_type = (raw->msg[0] & 0x80) >> 7; in drm_dp_sideband_parse_reply()
1029 msg->req_type = (raw->msg[0] & 0x7f); in drm_dp_sideband_parse_reply()
1031 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_sideband_parse_reply()
1032 memcpy(msg->u.nak.guid, &raw->msg[1], 16); in drm_dp_sideband_parse_reply()
1033 msg->u.nak.reason = raw->msg[17]; in drm_dp_sideband_parse_reply()
1034 msg->u.nak.nak_data = raw->msg[18]; in drm_dp_sideband_parse_reply()
1038 switch (msg->req_type) { in drm_dp_sideband_parse_reply()
1063 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n", in drm_dp_sideband_parse_reply()
1064 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); in drm_dp_sideband_parse_reply()
1076 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; in drm_dp_sideband_parse_connection_status_notify()
1078 if (idx > raw->curlen) in drm_dp_sideband_parse_connection_status_notify()
1081 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_connection_status_notify()
1083 if (idx > raw->curlen) in drm_dp_sideband_parse_connection_status_notify()
1086 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1087 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1088 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1089 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; in drm_dp_sideband_parse_connection_status_notify()
1090 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); in drm_dp_sideband_parse_connection_status_notify()
1094 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n", in drm_dp_sideband_parse_connection_status_notify()
1095 idx, raw->curlen); in drm_dp_sideband_parse_connection_status_notify()
1105 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; in drm_dp_sideband_parse_resource_status_notify()
1107 if (idx > raw->curlen) in drm_dp_sideband_parse_resource_status_notify()
1110 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); in drm_dp_sideband_parse_resource_status_notify()
1112 if (idx > raw->curlen) in drm_dp_sideband_parse_resource_status_notify()
1115 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); in drm_dp_sideband_parse_resource_status_notify()
1119 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen); in drm_dp_sideband_parse_resource_status_notify()
1128 msg->req_type = (raw->msg[0] & 0x7f); in drm_dp_sideband_parse_req()
1130 switch (msg->req_type) { in drm_dp_sideband_parse_req()
1136 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n", in drm_dp_sideband_parse_req()
1137 msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); in drm_dp_sideband_parse_req()
1169 msg->path_msg = true; in build_clear_payload_id_table()
1180 msg->path_msg = true; in build_enum_path_resources()
1201 msg->path_msg = true; in build_allocate_payload()
1216 msg->path_msg = true; in build_power_updown_phy()
1244 * All updates to txmsg->state are protected by mgr->qlock, and the two in check_txmsg_state()
1248 state = READ_ONCE(txmsg->state); in check_txmsg_state()
1256 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_wait_tx_reply()
1264 * poll-waiting for the MST reply interrupt if we didn't receive in drm_dp_mst_wait_tx_reply()
1268 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason in drm_dp_mst_wait_tx_reply()
1275 ret = wait_event_timeout(mgr->tx_waitq, in drm_dp_mst_wait_tx_reply()
1277 mgr->cbs->poll_hpd_irq ? in drm_dp_mst_wait_tx_reply()
1281 if (ret || !mgr->cbs->poll_hpd_irq || in drm_dp_mst_wait_tx_reply()
1285 mgr->cbs->poll_hpd_irq(mgr); in drm_dp_mst_wait_tx_reply()
1288 mutex_lock(&mgr->qlock); in drm_dp_mst_wait_tx_reply()
1290 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { in drm_dp_mst_wait_tx_reply()
1291 ret = -EIO; in drm_dp_mst_wait_tx_reply()
1295 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n", in drm_dp_mst_wait_tx_reply()
1296 txmsg, txmsg->state, txmsg->seqno); in drm_dp_mst_wait_tx_reply()
1299 ret = -EIO; in drm_dp_mst_wait_tx_reply()
1302 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || in drm_dp_mst_wait_tx_reply()
1303 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || in drm_dp_mst_wait_tx_reply()
1304 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in drm_dp_mst_wait_tx_reply()
1305 list_del(&txmsg->next); in drm_dp_mst_wait_tx_reply()
1308 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { in drm_dp_mst_wait_tx_reply()
1313 mutex_unlock(&mgr->qlock); in drm_dp_mst_wait_tx_reply()
1327 mstb->lct = lct; in drm_dp_add_mst_branch_device()
1329 memcpy(mstb->rad, rad, lct / 2); in drm_dp_add_mst_branch_device()
1330 INIT_LIST_HEAD(&mstb->ports); in drm_dp_add_mst_branch_device()
1331 kref_init(&mstb->topology_kref); in drm_dp_add_mst_branch_device()
1332 kref_init(&mstb->malloc_kref); in drm_dp_add_mst_branch_device()
1341 if (mstb->port_parent) in drm_dp_free_mst_branch_device()
1342 drm_dp_mst_put_port_malloc(mstb->port_parent); in drm_dp_free_mst_branch_device()
1350 * Topology refcount overview
1355 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1357 * Topology refcounts are not exposed to drivers, and are handled internally
1359 * in-memory topology state from being changed in the middle of critical
1362 * of the topology until its topology refcount reaches zero. Additionally,
1370 * drm_dp_mst_branch allocated even after all of its topology references have
1372 * branch's last known state before it was disconnected from the topology.
1380 * helpers. Exposing this API to drivers in a race-free manner would take more
1384 * Refcount relationships in a topology
1387 * Let's take a look at why the relationship between topology and malloc
1390 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1392 * An example of topology and malloc refs in a DP MST topology with two
1393 * active payloads. Topology refcount increments are indicated by solid
1399 * As you can see in the above figure, every branch increments the topology
1406 * topology would start to look like the figure below.
1408 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1413 * Whenever a port or branch device's topology refcount reaches zero, it will
1414 * decrement the topology refcounts of all its children, the malloc refcount
1416 * #4, this means they both have been disconnected from the topology and freed
1418 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1426 * connected to the topology. In this case, we would travel up the topology as
1429 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1436 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1449 kref_get(&mstb->malloc_kref); in drm_dp_mst_get_mstb_malloc()
1450 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); in drm_dp_mst_get_mstb_malloc()
1454 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1467 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); in drm_dp_mst_put_mstb_malloc()
1468 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); in drm_dp_mst_put_mstb_malloc()
1476 drm_dp_mst_put_mstb_malloc(port->parent); in drm_dp_free_mst_port()
1481 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1500 kref_get(&port->malloc_kref); in drm_dp_mst_get_port_malloc()
1501 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref)); in drm_dp_mst_get_port_malloc()
1506 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1518 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); in drm_dp_mst_put_port_malloc()
1519 kref_put(&port->malloc_kref, drm_dp_free_mst_port); in drm_dp_mst_put_port_malloc()
1544 for (i = 0; i < history->len; i++) { in __topology_ref_save()
1545 if (history->entries[i].backtrace == backtrace) { in __topology_ref_save()
1546 entry = &history->entries[i]; in __topology_ref_save()
1554 int new_len = history->len + 1; in __topology_ref_save()
1556 new = krealloc(history->entries, sizeof(*new) * new_len, in __topology_ref_save()
1561 entry = &new[history->len]; in __topology_ref_save()
1562 history->len = new_len; in __topology_ref_save()
1563 history->entries = new; in __topology_ref_save()
1565 entry->backtrace = backtrace; in __topology_ref_save()
1566 entry->type = type; in __topology_ref_save()
1567 entry->count = 0; in __topology_ref_save()
1569 entry->count++; in __topology_ref_save()
1570 entry->ts_nsec = ktime_get_ns(); in __topology_ref_save()
1578 if (entry_a->ts_nsec > entry_b->ts_nsec) in topology_ref_history_cmp()
1580 else if (entry_a->ts_nsec < entry_b->ts_nsec) in topology_ref_history_cmp()
1581 return -1; in topology_ref_history_cmp()
1606 if (!history->len) in __dump_topology_ref_history()
1612 sort(history->entries, history->len, sizeof(*history->entries), in __dump_topology_ref_history()
1615 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", in __dump_topology_ref_history()
1618 for (i = 0; i < history->len; i++) { in __dump_topology_ref_history()
1620 &history->entries[i]; in __dump_topology_ref_history()
1621 u64 ts_nsec = entry->ts_nsec; in __dump_topology_ref_history()
1624 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4); in __dump_topology_ref_history()
1627 entry->count, in __dump_topology_ref_history()
1628 topology_ref_type_to_str(entry->type), in __dump_topology_ref_history()
1633 kfree(history->entries); in __dump_topology_ref_history()
1641 __dump_topology_ref_history(&mstb->topology_ref_history, mstb, in drm_dp_mst_dump_mstb_topology_history()
1648 __dump_topology_ref_history(&port->topology_ref_history, port, in drm_dp_mst_dump_port_topology_history()
1656 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); in save_mstb_topology_ref()
1663 __topology_ref_save(port->mgr, &port->topology_ref_history, type); in save_port_topology_ref()
1669 mutex_lock(&mgr->topology_ref_history_lock); in topology_ref_history_lock()
1675 mutex_unlock(&mgr->topology_ref_history_lock); in topology_ref_history_unlock()
1696 list_for_each_entry(payload, &state->payloads, next) in drm_atomic_get_mst_payload_state()
1697 if (payload->port == port) in drm_atomic_get_mst_payload_state()
1708 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_destroy_mst_branch_device()
1712 INIT_LIST_HEAD(&mstb->destroy_next); in drm_dp_destroy_mst_branch_device()
1715 * This can get called under mgr->mutex, so we need to perform the in drm_dp_destroy_mst_branch_device()
1718 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_destroy_mst_branch_device()
1719 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); in drm_dp_destroy_mst_branch_device()
1720 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_destroy_mst_branch_device()
1721 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); in drm_dp_destroy_mst_branch_device()
1725 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1727 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1729 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1730 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1731 * reached 0). Holding a topology reference implies that a malloc reference
1732 * will be held to @mstb as long as the user holds the topology reference.
1735 * reference to @mstb. If you already have a topology reference to @mstb, you
1743 * * 1: A topology reference was grabbed successfully
1744 * * 0: @port is no longer in the topology, no reference was grabbed
1751 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_try_get_mstb()
1752 ret = kref_get_unless_zero(&mstb->topology_kref); in drm_dp_mst_topology_try_get_mstb()
1754 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); in drm_dp_mst_topology_try_get_mstb()
1758 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_try_get_mstb()
1764 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1766 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1770 * you are already guaranteed to have at least one active topology reference
1779 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_get_mstb()
1782 WARN_ON(kref_read(&mstb->topology_kref) == 0); in drm_dp_mst_topology_get_mstb()
1783 kref_get(&mstb->topology_kref); in drm_dp_mst_topology_get_mstb()
1784 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); in drm_dp_mst_topology_get_mstb()
1786 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_get_mstb()
1790 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1792 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1794 * Releases a topology reference from @mstb by decrementing
1804 topology_ref_history_lock(mstb->mgr); in drm_dp_mst_topology_put_mstb()
1806 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); in drm_dp_mst_topology_put_mstb()
1809 topology_ref_history_unlock(mstb->mgr); in drm_dp_mst_topology_put_mstb()
1810 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); in drm_dp_mst_topology_put_mstb()
1817 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_destroy_port()
1822 if (port->input) { in drm_dp_destroy_port()
1827 drm_edid_free(port->cached_edid); in drm_dp_destroy_port()
1833 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_destroy_port()
1834 list_add(&port->next, &mgr->destroy_port_list); in drm_dp_destroy_port()
1835 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_destroy_port()
1836 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work); in drm_dp_destroy_port()
1840 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1842 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1844 * Attempts to grab a topology reference to @port, if it hasn't yet been
1845 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1846 * 0). Holding a topology reference implies that a malloc reference will be
1847 * held to @port as long as the user holds the topology reference.
1850 * reference to @port. If you already have a topology reference to @port, you
1858 * * 1: A topology reference was grabbed successfully
1859 * * 0: @port is no longer in the topology, no reference was grabbed
1866 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_try_get_port()
1867 ret = kref_get_unless_zero(&port->topology_kref); in drm_dp_mst_topology_try_get_port()
1869 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); in drm_dp_mst_topology_try_get_port()
1873 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_try_get_port()
1878 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1879 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1883 * you are already guaranteed to have at least one active topology reference
1892 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_get_port()
1894 WARN_ON(kref_read(&port->topology_kref) == 0); in drm_dp_mst_topology_get_port()
1895 kref_get(&port->topology_kref); in drm_dp_mst_topology_get_port()
1896 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref)); in drm_dp_mst_topology_get_port()
1899 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_get_port()
1903 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1904 * @port: The &struct drm_dp_mst_port to release the topology reference from
1906 * Releases a topology reference from @port by decrementing
1915 topology_ref_history_lock(port->mgr); in drm_dp_mst_topology_put_port()
1917 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); in drm_dp_mst_topology_put_port()
1920 topology_ref_history_unlock(port->mgr); in drm_dp_mst_topology_put_port()
1921 kref_put(&port->topology_kref, drm_dp_destroy_port); in drm_dp_mst_topology_put_port()
1934 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_topology_get_mstb_validated_locked()
1935 if (port->mstb) { in drm_dp_mst_topology_get_mstb_validated_locked()
1937 port->mstb, to_find); in drm_dp_mst_topology_get_mstb_validated_locked()
1951 mutex_lock(&mgr->lock); in drm_dp_mst_topology_get_mstb_validated()
1952 if (mgr->mst_primary) { in drm_dp_mst_topology_get_mstb_validated()
1954 mgr->mst_primary, mstb); in drm_dp_mst_topology_get_mstb_validated()
1959 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_get_mstb_validated()
1969 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_topology_get_port_validated_locked()
1973 if (port->mstb) { in drm_dp_mst_topology_get_port_validated_locked()
1975 port->mstb, to_find); in drm_dp_mst_topology_get_port_validated_locked()
1989 mutex_lock(&mgr->lock); in drm_dp_mst_topology_get_port_validated()
1990 if (mgr->mst_primary) { in drm_dp_mst_topology_get_port_validated()
1992 mgr->mst_primary, port); in drm_dp_mst_topology_get_port_validated()
1997 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_get_port_validated()
2006 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_get_port()
2007 if (port->port_num == port_num) { in drm_dp_get_port()
2024 int parent_lct = port->parent->lct; in drm_dp_calculate_rad()
2026 int idx = (parent_lct - 1) / 2; in drm_dp_calculate_rad()
2029 memcpy(rad, port->parent->rad, idx + 1); in drm_dp_calculate_rad()
2034 rad[idx] |= port->port_num << shift; in drm_dp_calculate_rad()
2058 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_port_set_pdt()
2063 if (port->pdt == new_pdt && port->mcs == new_mcs) in drm_dp_port_set_pdt()
2067 if (port->pdt != DP_PEER_DEVICE_NONE) { in drm_dp_port_set_pdt()
2068 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_port_set_pdt()
2070 * If the new PDT would also have an i2c bus, in drm_dp_port_set_pdt()
2075 port->pdt = new_pdt; in drm_dp_port_set_pdt()
2076 port->mcs = new_mcs; in drm_dp_port_set_pdt()
2080 /* remove i2c over sideband */ in drm_dp_port_set_pdt()
2083 mutex_lock(&mgr->lock); in drm_dp_port_set_pdt()
2084 drm_dp_mst_topology_put_mstb(port->mstb); in drm_dp_port_set_pdt()
2085 port->mstb = NULL; in drm_dp_port_set_pdt()
2086 mutex_unlock(&mgr->lock); in drm_dp_port_set_pdt()
2090 port->pdt = new_pdt; in drm_dp_port_set_pdt()
2091 port->mcs = new_mcs; in drm_dp_port_set_pdt()
2093 if (port->pdt != DP_PEER_DEVICE_NONE) { in drm_dp_port_set_pdt()
2094 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_port_set_pdt()
2095 /* add i2c over sideband */ in drm_dp_port_set_pdt()
2101 ret = -ENOMEM; in drm_dp_port_set_pdt()
2102 drm_err(mgr->dev, "Failed to create MSTB for port %p", port); in drm_dp_port_set_pdt()
2106 mutex_lock(&mgr->lock); in drm_dp_port_set_pdt()
2107 port->mstb = mstb; in drm_dp_port_set_pdt()
2108 mstb->mgr = port->mgr; in drm_dp_port_set_pdt()
2109 mstb->port_parent = port; in drm_dp_port_set_pdt()
2116 mutex_unlock(&mgr->lock); in drm_dp_port_set_pdt()
2125 port->pdt = DP_PEER_DEVICE_NONE; in drm_dp_port_set_pdt()
2130 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2148 return drm_dp_send_dpcd_read(port->mgr, port, in drm_dp_mst_dpcd_read()
2153 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2171 return drm_dp_send_dpcd_write(port->mgr, port, in drm_dp_mst_dpcd_write()
2179 memcpy(mstb->guid, guid, 16); in drm_dp_check_mstb_guid()
2181 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { in drm_dp_check_mstb_guid()
2182 if (mstb->port_parent) { in drm_dp_check_mstb_guid()
2183 ret = drm_dp_send_dpcd_write(mstb->mgr, in drm_dp_check_mstb_guid()
2184 mstb->port_parent, in drm_dp_check_mstb_guid()
2185 DP_GUID, 16, mstb->guid); in drm_dp_check_mstb_guid()
2187 ret = drm_dp_dpcd_write(mstb->mgr->aux, in drm_dp_check_mstb_guid()
2188 DP_GUID, mstb->guid, 16); in drm_dp_check_mstb_guid()
2193 return -EPROTO; in drm_dp_check_mstb_guid()
2206 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); in build_mst_prop_path()
2207 for (i = 0; i < (mstb->lct - 1); i++) { in build_mst_prop_path()
2209 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; in build_mst_prop_path()
2211 snprintf(temp, sizeof(temp), "-%d", port_num); in build_mst_prop_path()
2214 snprintf(temp, sizeof(temp), "-%d", pnum); in build_mst_prop_path()
2219 * drm_dp_mst_connector_late_register() - Late MST connector registration
2232 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n", in drm_dp_mst_connector_late_register()
2233 port->aux.name, connector->kdev->kobj.name); in drm_dp_mst_connector_late_register()
2235 port->aux.dev = connector->kdev; in drm_dp_mst_connector_late_register()
2236 return drm_dp_aux_register_devnode(&port->aux); in drm_dp_mst_connector_late_register()
2241 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2252 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n", in drm_dp_mst_connector_early_unregister()
2253 port->aux.name, connector->kdev->kobj.name); in drm_dp_mst_connector_early_unregister()
2254 drm_dp_aux_unregister_devnode(&port->aux); in drm_dp_mst_connector_early_unregister()
2262 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_port_add_connector()
2266 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); in drm_dp_mst_port_add_connector()
2267 port->connector = mgr->cbs->add_connector(mgr, port, proppath); in drm_dp_mst_port_add_connector()
2268 if (!port->connector) { in drm_dp_mst_port_add_connector()
2269 ret = -ENOMEM; in drm_dp_mst_port_add_connector()
2273 if (port->pdt != DP_PEER_DEVICE_NONE && in drm_dp_mst_port_add_connector()
2274 drm_dp_mst_is_end_device(port->pdt, port->mcs) && in drm_dp_mst_port_add_connector()
2275 port->port_num >= DP_MST_LOGICAL_PORT_0) in drm_dp_mst_port_add_connector()
2276 port->cached_edid = drm_edid_read_ddc(port->connector, in drm_dp_mst_port_add_connector()
2277 &port->aux.ddc); in drm_dp_mst_port_add_connector()
2279 drm_connector_register(port->connector); in drm_dp_mst_port_add_connector()
2283 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret); in drm_dp_mst_port_add_connector()
2287 * Drop a topology reference, and unlink the port from the in-memory topology
2294 mutex_lock(&mgr->lock); in drm_dp_mst_topology_unlink_port()
2295 port->parent->num_ports--; in drm_dp_mst_topology_unlink_port()
2296 list_del(&port->next); in drm_dp_mst_topology_unlink_port()
2297 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_unlink_port()
2311 kref_init(&port->topology_kref); in drm_dp_mst_add_port()
2312 kref_init(&port->malloc_kref); in drm_dp_mst_add_port()
2313 port->parent = mstb; in drm_dp_mst_add_port()
2314 port->port_num = port_number; in drm_dp_mst_add_port()
2315 port->mgr = mgr; in drm_dp_mst_add_port()
2316 port->aux.name = "DPMST"; in drm_dp_mst_add_port()
2317 port->aux.dev = dev->dev; in drm_dp_mst_add_port()
2318 port->aux.is_remote = true; in drm_dp_mst_add_port()
2321 port->aux.drm_dev = dev; in drm_dp_mst_add_port()
2322 drm_dp_remote_aux_init(&port->aux); in drm_dp_mst_add_port()
2338 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_handle_link_address_port()
2345 port = drm_dp_get_port(mstb, port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2348 port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2350 return -ENOMEM; in drm_dp_mst_handle_link_address_port()
2353 } else if (!port->input && port_msg->input_port && port->connector) { in drm_dp_mst_handle_link_address_port()
2354 /* Since port->connector can't be changed here, we create a in drm_dp_mst_handle_link_address_port()
2360 port_msg->port_number); in drm_dp_mst_handle_link_address_port()
2362 return -ENOMEM; in drm_dp_mst_handle_link_address_port()
2365 } else if (port->input && !port_msg->input_port) { in drm_dp_mst_handle_link_address_port()
2367 } else if (port->connector) { in drm_dp_mst_handle_link_address_port()
2371 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_mst_handle_link_address_port()
2373 old_ddps = port->ddps; in drm_dp_mst_handle_link_address_port()
2374 changed = port->ddps != port_msg->ddps || in drm_dp_mst_handle_link_address_port()
2375 (port->ddps && in drm_dp_mst_handle_link_address_port()
2376 (port->ldps != port_msg->legacy_device_plug_status || in drm_dp_mst_handle_link_address_port()
2377 port->dpcd_rev != port_msg->dpcd_revision || in drm_dp_mst_handle_link_address_port()
2378 port->mcs != port_msg->mcs || in drm_dp_mst_handle_link_address_port()
2379 port->pdt != port_msg->peer_device_type || in drm_dp_mst_handle_link_address_port()
2380 port->num_sdp_stream_sinks != in drm_dp_mst_handle_link_address_port()
2381 port_msg->num_sdp_stream_sinks)); in drm_dp_mst_handle_link_address_port()
2384 port->input = port_msg->input_port; in drm_dp_mst_handle_link_address_port()
2385 if (!port->input) in drm_dp_mst_handle_link_address_port()
2386 new_pdt = port_msg->peer_device_type; in drm_dp_mst_handle_link_address_port()
2387 new_mcs = port_msg->mcs; in drm_dp_mst_handle_link_address_port()
2388 port->ddps = port_msg->ddps; in drm_dp_mst_handle_link_address_port()
2389 port->ldps = port_msg->legacy_device_plug_status; in drm_dp_mst_handle_link_address_port()
2390 port->dpcd_rev = port_msg->dpcd_revision; in drm_dp_mst_handle_link_address_port()
2391 port->num_sdp_streams = port_msg->num_sdp_streams; in drm_dp_mst_handle_link_address_port()
2392 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; in drm_dp_mst_handle_link_address_port()
2394 /* manage mstb port lists with mgr lock - take a reference in drm_dp_mst_handle_link_address_port()
2397 mutex_lock(&mgr->lock); in drm_dp_mst_handle_link_address_port()
2399 list_add(&port->next, &mstb->ports); in drm_dp_mst_handle_link_address_port()
2400 mstb->num_ports++; in drm_dp_mst_handle_link_address_port()
2401 mutex_unlock(&mgr->lock); in drm_dp_mst_handle_link_address_port()
2405 * Reprobe PBN caps on both hotplug, and when re-probing the link in drm_dp_mst_handle_link_address_port()
2408 if (old_ddps != port->ddps || !created) { in drm_dp_mst_handle_link_address_port()
2409 if (port->ddps && !port->input) { in drm_dp_mst_handle_link_address_port()
2415 port->full_pbn = 0; in drm_dp_mst_handle_link_address_port()
2432 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && in drm_dp_mst_handle_link_address_port()
2433 port->mcs) in drm_dp_mst_handle_link_address_port()
2436 if (port->connector) in drm_dp_mst_handle_link_address_port()
2437 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_link_address_port()
2438 else if (!port->input) in drm_dp_mst_handle_link_address_port()
2441 if (send_link_addr && port->mstb) { in drm_dp_mst_handle_link_address_port()
2442 ret = drm_dp_send_link_address(mgr, port->mstb); in drm_dp_mst_handle_link_address_port()
2455 if (port->connector) in drm_dp_mst_handle_link_address_port()
2456 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_link_address_port()
2466 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_handle_conn_stat()
2473 port = drm_dp_get_port(mstb, conn_stat->port_number); in drm_dp_mst_handle_conn_stat()
2477 if (port->connector) { in drm_dp_mst_handle_conn_stat()
2478 if (!port->input && conn_stat->input_port) { in drm_dp_mst_handle_conn_stat()
2485 mstb->link_address_sent = false; in drm_dp_mst_handle_conn_stat()
2491 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_mst_handle_conn_stat()
2492 } else if (port->input && !conn_stat->input_port) { in drm_dp_mst_handle_conn_stat()
2495 mstb->link_address_sent = false; in drm_dp_mst_handle_conn_stat()
2499 old_ddps = port->ddps; in drm_dp_mst_handle_conn_stat()
2500 port->input = conn_stat->input_port; in drm_dp_mst_handle_conn_stat()
2501 port->ldps = conn_stat->legacy_device_plug_status; in drm_dp_mst_handle_conn_stat()
2502 port->ddps = conn_stat->displayport_device_plug_status; in drm_dp_mst_handle_conn_stat()
2504 if (old_ddps != port->ddps) { in drm_dp_mst_handle_conn_stat()
2505 if (port->ddps && !port->input) in drm_dp_mst_handle_conn_stat()
2508 port->full_pbn = 0; in drm_dp_mst_handle_conn_stat()
2511 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; in drm_dp_mst_handle_conn_stat()
2512 new_mcs = conn_stat->message_capability_status; in drm_dp_mst_handle_conn_stat()
2517 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret); in drm_dp_mst_handle_conn_stat()
2521 if (port->connector) in drm_dp_mst_handle_conn_stat()
2522 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_handle_conn_stat()
2539 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device()
2540 mstb = mgr->mst_primary; in drm_dp_get_mst_branch_device()
2545 for (i = 0; i < lct - 1; i++) { in drm_dp_get_mst_branch_device()
2549 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_get_mst_branch_device()
2550 if (port->port_num == port_num) { in drm_dp_get_mst_branch_device()
2551 mstb = port->mstb; in drm_dp_get_mst_branch_device()
2553 drm_err(mgr->dev, in drm_dp_get_mst_branch_device()
2567 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device()
2581 if (memcmp(mstb->guid, guid, 16) == 0) in get_mst_branch_device_by_guid_helper()
2585 list_for_each_entry(port, &mstb->ports, next) { in get_mst_branch_device_by_guid_helper()
2586 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); in get_mst_branch_device_by_guid_helper()
2603 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
2605 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); in drm_dp_get_mst_branch_device_by_guid()
2612 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
2623 if (!mstb->link_address_sent) { in drm_dp_check_and_send_link_address()
2631 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_check_and_send_link_address()
2632 if (port->input || !port->ddps || !port->mstb) in drm_dp_check_and_send_link_address()
2635 ret = drm_dp_check_and_send_link_address(mgr, port->mstb); in drm_dp_check_and_send_link_address()
2649 struct drm_device *dev = mgr->dev; in drm_dp_mst_link_probe_work()
2654 mutex_lock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2656 mutex_lock(&mgr->lock); in drm_dp_mst_link_probe_work()
2657 clear_payload_id_table = !mgr->payload_id_table_cleared; in drm_dp_mst_link_probe_work()
2658 mgr->payload_id_table_cleared = true; in drm_dp_mst_link_probe_work()
2660 mstb = mgr->mst_primary; in drm_dp_mst_link_probe_work()
2666 mutex_unlock(&mgr->lock); in drm_dp_mst_link_probe_work()
2668 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2676 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C in drm_dp_mst_link_probe_work()
2688 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_link_probe_work()
2733 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); in drm_dp_send_sideband_msg()
2735 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, in drm_dp_send_sideband_msg()
2739 if (ret == -EIO && retries < 5) { in drm_dp_send_sideband_msg()
2743 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); in drm_dp_send_sideband_msg()
2745 return -EIO; in drm_dp_send_sideband_msg()
2748 total -= tosend; in drm_dp_send_sideband_msg()
2756 struct drm_dp_mst_branch *mstb = txmsg->dst; in set_hdr_from_dst_qlock()
2759 req_type = txmsg->msg[0] & 0x7f; in set_hdr_from_dst_qlock()
2763 hdr->broadcast = 1; in set_hdr_from_dst_qlock()
2765 hdr->broadcast = 0; in set_hdr_from_dst_qlock()
2766 hdr->path_msg = txmsg->path_msg; in set_hdr_from_dst_qlock()
2767 if (hdr->broadcast) { in set_hdr_from_dst_qlock()
2768 hdr->lct = 1; in set_hdr_from_dst_qlock()
2769 hdr->lcr = 6; in set_hdr_from_dst_qlock()
2771 hdr->lct = mstb->lct; in set_hdr_from_dst_qlock()
2772 hdr->lcr = mstb->lct - 1; in set_hdr_from_dst_qlock()
2775 memcpy(hdr->rad, mstb->rad, hdr->lct / 2); in set_hdr_from_dst_qlock()
2791 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in process_single_tx_qlock()
2796 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) in process_single_tx_qlock()
2797 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; in process_single_tx_qlock()
2805 len = txmsg->cur_len - txmsg->cur_offset; in process_single_tx_qlock()
2807 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ in process_single_tx_qlock()
2808 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); in process_single_tx_qlock()
2811 if (len == txmsg->cur_len) in process_single_tx_qlock()
2819 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); in process_single_tx_qlock()
2835 txmsg->cur_offset += tosend; in process_single_tx_qlock()
2836 if (txmsg->cur_offset == txmsg->cur_len) { in process_single_tx_qlock()
2837 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; in process_single_tx_qlock()
2848 WARN_ON(!mutex_is_locked(&mgr->qlock)); in process_single_down_tx_qlock()
2851 if (list_empty(&mgr->tx_msg_downq)) in process_single_down_tx_qlock()
2854 txmsg = list_first_entry(&mgr->tx_msg_downq, in process_single_down_tx_qlock()
2858 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret); in process_single_down_tx_qlock()
2859 list_del(&txmsg->next); in process_single_down_tx_qlock()
2860 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in process_single_down_tx_qlock()
2861 wake_up_all(&mgr->tx_waitq); in process_single_down_tx_qlock()
2868 mutex_lock(&mgr->qlock); in drm_dp_queue_down_tx()
2869 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); in drm_dp_queue_down_tx()
2877 if (list_is_singular(&mgr->tx_msg_downq)) in drm_dp_queue_down_tx()
2879 mutex_unlock(&mgr->qlock); in drm_dp_queue_down_tx()
2889 for (i = 0; i < reply->nports; i++) { in drm_dp_dump_link_address()
2890 port_reply = &reply->ports[i]; in drm_dp_dump_link_address()
2891 drm_dbg_kms(mgr->dev, in drm_dp_dump_link_address()
2894 port_reply->input_port, in drm_dp_dump_link_address()
2895 port_reply->peer_device_type, in drm_dp_dump_link_address()
2896 port_reply->port_number, in drm_dp_dump_link_address()
2897 port_reply->dpcd_revision, in drm_dp_dump_link_address()
2898 port_reply->mcs, in drm_dp_dump_link_address()
2899 port_reply->ddps, in drm_dp_dump_link_address()
2900 port_reply->legacy_device_plug_status, in drm_dp_dump_link_address()
2901 port_reply->num_sdp_streams, in drm_dp_dump_link_address()
2902 port_reply->num_sdp_stream_sinks); in drm_dp_dump_link_address()
2917 return -ENOMEM; in drm_dp_send_link_address()
2919 txmsg->dst = mstb; in drm_dp_send_link_address()
2922 mstb->link_address_sent = true; in drm_dp_send_link_address()
2928 drm_err(mgr->dev, "Sending link address failed with %d\n", ret); in drm_dp_send_link_address()
2931 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_link_address()
2932 drm_err(mgr->dev, "link address NAK received\n"); in drm_dp_send_link_address()
2933 ret = -EIO; in drm_dp_send_link_address()
2937 reply = &txmsg->reply.u.link_addr; in drm_dp_send_link_address()
2938 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); in drm_dp_send_link_address()
2941 ret = drm_dp_check_mstb_guid(mstb, reply->guid); in drm_dp_send_link_address()
2945 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf)); in drm_dp_send_link_address()
2946 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret); in drm_dp_send_link_address()
2950 for (i = 0; i < reply->nports; i++) { in drm_dp_send_link_address()
2951 port_mask |= BIT(reply->ports[i].port_number); in drm_dp_send_link_address()
2952 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, in drm_dp_send_link_address()
2953 &reply->ports[i]); in drm_dp_send_link_address()
2960 /* Prune any ports that are currently a part of mstb in our in-memory in drm_dp_send_link_address()
2961 * topology, but were not seen in this link address. Usually this in drm_dp_send_link_address()
2962 * means that they were removed while the topology was out of sync, in drm_dp_send_link_address()
2965 mutex_lock(&mgr->lock); in drm_dp_send_link_address()
2966 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { in drm_dp_send_link_address()
2967 if (port_mask & BIT(port->port_num)) in drm_dp_send_link_address()
2970 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n", in drm_dp_send_link_address()
2971 port->port_num); in drm_dp_send_link_address()
2972 list_del(&port->next); in drm_dp_send_link_address()
2976 mutex_unlock(&mgr->lock); in drm_dp_send_link_address()
2980 mstb->link_address_sent = false; in drm_dp_send_link_address()
2996 txmsg->dst = mstb; in drm_dp_send_clear_payload_id_table()
3002 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_clear_payload_id_table()
3003 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n"); in drm_dp_send_clear_payload_id_table()
3019 return -ENOMEM; in drm_dp_send_enum_path_resources()
3021 txmsg->dst = mstb; in drm_dp_send_enum_path_resources()
3022 build_enum_path_resources(txmsg, port->port_num); in drm_dp_send_enum_path_resources()
3029 path_res = &txmsg->reply.u.path_resources; in drm_dp_send_enum_path_resources()
3031 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_enum_path_resources()
3032 drm_dbg_kms(mgr->dev, "enum path resources nak received\n"); in drm_dp_send_enum_path_resources()
3034 if (port->port_num != path_res->port_number) in drm_dp_send_enum_path_resources()
3037 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n", in drm_dp_send_enum_path_resources()
3038 path_res->port_number, in drm_dp_send_enum_path_resources()
3039 path_res->full_payload_bw_number, in drm_dp_send_enum_path_resources()
3040 path_res->avail_payload_bw_number); in drm_dp_send_enum_path_resources()
3046 if (port->full_pbn != path_res->full_payload_bw_number || in drm_dp_send_enum_path_resources()
3047 port->fec_capable != path_res->fec_capable) in drm_dp_send_enum_path_resources()
3050 port->full_pbn = path_res->full_payload_bw_number; in drm_dp_send_enum_path_resources()
3051 port->fec_capable = path_res->fec_capable; in drm_dp_send_enum_path_resources()
3061 if (!mstb->port_parent) in drm_dp_get_last_connected_port_to_mstb()
3064 if (mstb->port_parent->mstb != mstb) in drm_dp_get_last_connected_port_to_mstb()
3065 return mstb->port_parent; in drm_dp_get_last_connected_port_to_mstb()
3067 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); in drm_dp_get_last_connected_port_to_mstb()
3071 * Searches upwards in the topology starting from mstb to try to find the
3073 * topology. This can be used in order to perform operations like releasing
3086 mutex_lock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
3087 if (!mgr->mst_primary) in drm_dp_get_last_connected_port_and_mstb()
3095 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { in drm_dp_get_last_connected_port_and_mstb()
3096 rmstb = found_port->parent; in drm_dp_get_last_connected_port_and_mstb()
3097 *port_num = found_port->port_num; in drm_dp_get_last_connected_port_and_mstb()
3100 mstb = found_port->parent; in drm_dp_get_last_connected_port_and_mstb()
3104 mutex_unlock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
3119 port_num = port->port_num; in drm_dp_payload_send_msg()
3120 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_payload_send_msg()
3123 port->parent, in drm_dp_payload_send_msg()
3127 return -EINVAL; in drm_dp_payload_send_msg()
3132 ret = -ENOMEM; in drm_dp_payload_send_msg()
3136 for (i = 0; i < port->num_sdp_streams; i++) in drm_dp_payload_send_msg()
3139 txmsg->dst = mstb; in drm_dp_payload_send_msg()
3142 pbn, port->num_sdp_streams, sinks); in drm_dp_payload_send_msg()
3149 * mstb could also be removed from the topology. In the future, this in drm_dp_payload_send_msg()
3152 * timeout if the topology is still connected to the system. in drm_dp_payload_send_msg()
3156 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_payload_send_msg()
3157 ret = -EINVAL; in drm_dp_payload_send_msg()
3175 return -EINVAL; in drm_dp_send_power_updown_phy()
3180 return -ENOMEM; in drm_dp_send_power_updown_phy()
3183 txmsg->dst = port->parent; in drm_dp_send_power_updown_phy()
3184 build_power_updown_phy(txmsg, port->port_num, power_up); in drm_dp_send_power_updown_phy()
3187 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); in drm_dp_send_power_updown_phy()
3189 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_power_updown_phy()
3190 ret = -EINVAL; in drm_dp_send_power_updown_phy()
3213 return -ENOMEM; in drm_dp_send_query_stream_enc_status()
3217 ret = -EINVAL; in drm_dp_send_query_stream_enc_status()
3223 drm_modeset_lock(&mgr->base.lock, NULL); in drm_dp_send_query_stream_enc_status()
3224 state = to_drm_dp_mst_topology_state(mgr->base.state); in drm_dp_send_query_stream_enc_status()
3232 txmsg->dst = mgr->mst_primary; in drm_dp_send_query_stream_enc_status()
3234 build_query_stream_enc_status(txmsg, payload->vcpi, nonce); in drm_dp_send_query_stream_enc_status()
3238 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg); in drm_dp_send_query_stream_enc_status()
3241 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_send_query_stream_enc_status()
3242 drm_dbg_kms(mgr->dev, "query encryption status nak received\n"); in drm_dp_send_query_stream_enc_status()
3243 ret = -ENXIO; in drm_dp_send_query_stream_enc_status()
3248 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); in drm_dp_send_query_stream_enc_status()
3251 drm_modeset_unlock(&mgr->base.lock); in drm_dp_send_query_stream_enc_status()
3262 return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, in drm_dp_create_payload_at_dfp()
3263 payload->time_slots); in drm_dp_create_payload_at_dfp()
3270 struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); in drm_dp_create_payload_to_remote()
3273 return -EIO; in drm_dp_create_payload_to_remote()
3275 ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn); in drm_dp_create_payload_to_remote()
3284 drm_dbg_kms(mgr->dev, "\n"); in drm_dp_destroy_payload_at_remote_and_dfp()
3287 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE) { in drm_dp_destroy_payload_at_remote_and_dfp()
3288 drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0); in drm_dp_destroy_payload_at_remote_and_dfp()
3289 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP; in drm_dp_destroy_payload_at_remote_and_dfp()
3292 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) in drm_dp_destroy_payload_at_remote_and_dfp()
3293 drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0); in drm_dp_destroy_payload_at_remote_and_dfp()
3297 * drm_dp_add_payload_part1() - Execute payload update part 1
3315 if (mgr->payload_count == 0) in drm_dp_add_payload_part1()
3316 mgr->next_start_slot = mst_state->start_slot; in drm_dp_add_payload_part1()
3318 payload->vc_start_slot = mgr->next_start_slot; in drm_dp_add_payload_part1()
3320 mgr->payload_count++; in drm_dp_add_payload_part1()
3321 mgr->next_start_slot += payload->time_slots; in drm_dp_add_payload_part1()
3323 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL; in drm_dp_add_payload_part1()
3326 port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); in drm_dp_add_payload_part1()
3328 drm_dbg_kms(mgr->dev, in drm_dp_add_payload_part1()
3329 "VCPI %d for port %p not in topology, not creating a payload to remote\n", in drm_dp_add_payload_part1()
3330 payload->vcpi, payload->port); in drm_dp_add_payload_part1()
3331 return -EIO; in drm_dp_add_payload_part1()
3336 drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n", in drm_dp_add_payload_part1()
3337 payload->port, ret); in drm_dp_add_payload_part1()
3341 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP; in drm_dp_add_payload_part1()
3351 * drm_dp_remove_payload_part1() - Remove an MST payload along the virtual channel
3367 mutex_lock(&mgr->lock); in drm_dp_remove_payload_part1()
3368 send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary); in drm_dp_remove_payload_part1()
3369 mutex_unlock(&mgr->lock); in drm_dp_remove_payload_part1()
3374 drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n", in drm_dp_remove_payload_part1()
3375 payload->vcpi); in drm_dp_remove_payload_part1()
3377 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL; in drm_dp_remove_payload_part1()
3382 * drm_dp_remove_payload_part2() - Remove an MST payload locally
3401 list_for_each_entry(pos, &mst_state->payloads, next) { in drm_dp_remove_payload_part2()
3402 if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot) in drm_dp_remove_payload_part2()
3403 pos->vc_start_slot -= old_payload->time_slots; in drm_dp_remove_payload_part2()
3405 new_payload->vc_start_slot = -1; in drm_dp_remove_payload_part2()
3407 mgr->payload_count--; in drm_dp_remove_payload_part2()
3408 mgr->next_start_slot -= old_payload->time_slots; in drm_dp_remove_payload_part2()
3410 if (new_payload->delete) in drm_dp_remove_payload_part2()
3411 drm_dp_mst_put_port_malloc(new_payload->port); in drm_dp_remove_payload_part2()
3413 new_payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE; in drm_dp_remove_payload_part2()
3417 * drm_dp_add_payload_part2() - Execute payload update part 2
3434 if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) { in drm_dp_add_payload_part2()
3435 drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", in drm_dp_add_payload_part2()
3436 payload->port->connector->name); in drm_dp_add_payload_part2()
3437 return -EIO; in drm_dp_add_payload_part2()
3443 drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n", in drm_dp_add_payload_part2()
3444 payload->port, ret); in drm_dp_add_payload_part2()
3446 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE; in drm_dp_add_payload_part2()
3460 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_send_dpcd_read()
3462 return -EINVAL; in drm_dp_send_dpcd_read()
3466 ret = -ENOMEM; in drm_dp_send_dpcd_read()
3470 build_dpcd_read(txmsg, port->port_num, offset, size); in drm_dp_send_dpcd_read()
3471 txmsg->dst = port->parent; in drm_dp_send_dpcd_read()
3479 if (txmsg->reply.reply_type == 1) { in drm_dp_send_dpcd_read()
3480 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", in drm_dp_send_dpcd_read()
3481 mstb, port->port_num, offset, size); in drm_dp_send_dpcd_read()
3482 ret = -EIO; in drm_dp_send_dpcd_read()
3486 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { in drm_dp_send_dpcd_read()
3487 ret = -EPROTO; in drm_dp_send_dpcd_read()
3491 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, in drm_dp_send_dpcd_read()
3493 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); in drm_dp_send_dpcd_read()
3511 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_send_dpcd_write()
3513 return -EINVAL; in drm_dp_send_dpcd_write()
3517 ret = -ENOMEM; in drm_dp_send_dpcd_write()
3521 build_dpcd_write(txmsg, port->port_num, offset, size, bytes); in drm_dp_send_dpcd_write()
3522 txmsg->dst = mstb; in drm_dp_send_dpcd_write()
3528 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) in drm_dp_send_dpcd_write()
3529 ret = -EIO; in drm_dp_send_dpcd_write()
3558 return -ENOMEM; in drm_dp_send_up_ack_reply()
3560 txmsg->dst = mstb; in drm_dp_send_up_ack_reply()
3563 mutex_lock(&mgr->qlock); in drm_dp_send_up_ack_reply()
3566 mutex_unlock(&mgr->qlock); in drm_dp_send_up_ack_reply()
3573 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
3593 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", in drm_dp_get_vc_payload_bw()
3606 * drm_dp_read_mst_cap() - check whether or not a sink supports MST
3628 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3630 * @mst_state: true to enable MST on this connector - false to disable.
3640 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
3641 if (mst_state == mgr->mst_state) in drm_dp_mst_topology_mgr_set_mst()
3644 mgr->mst_state = mst_state; in drm_dp_mst_topology_mgr_set_mst()
3647 WARN_ON(mgr->mst_primary); in drm_dp_mst_topology_mgr_set_mst()
3650 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd); in drm_dp_mst_topology_mgr_set_mst()
3652 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n", in drm_dp_mst_topology_mgr_set_mst()
3653 mgr->aux->name, ret); in drm_dp_mst_topology_mgr_set_mst()
3660 ret = -ENOMEM; in drm_dp_mst_topology_mgr_set_mst()
3663 mstb->mgr = mgr; in drm_dp_mst_topology_mgr_set_mst()
3666 mgr->mst_primary = mstb; in drm_dp_mst_topology_mgr_set_mst()
3667 drm_dp_mst_topology_get_mstb(mgr->mst_primary); in drm_dp_mst_topology_mgr_set_mst()
3669 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_set_mst()
3679 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_topology_mgr_set_mst()
3684 mstb = mgr->mst_primary; in drm_dp_mst_topology_mgr_set_mst()
3685 mgr->mst_primary = NULL; in drm_dp_mst_topology_mgr_set_mst()
3687 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); in drm_dp_mst_topology_mgr_set_mst()
3689 mgr->payload_id_table_cleared = false; in drm_dp_mst_topology_mgr_set_mst()
3691 memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv)); in drm_dp_mst_topology_mgr_set_mst()
3692 memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv)); in drm_dp_mst_topology_mgr_set_mst()
3696 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
3709 /* The link address will need to be re-sent on resume */ in drm_dp_mst_topology_mgr_invalidate_mstb()
3710 mstb->link_address_sent = false; in drm_dp_mst_topology_mgr_invalidate_mstb()
3712 list_for_each_entry(port, &mstb->ports, next) in drm_dp_mst_topology_mgr_invalidate_mstb()
3713 if (port->mstb) in drm_dp_mst_topology_mgr_invalidate_mstb()
3714 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); in drm_dp_mst_topology_mgr_invalidate_mstb()
3718 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3726 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3727 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_suspend()
3729 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3730 flush_work(&mgr->up_req_work); in drm_dp_mst_topology_mgr_suspend()
3731 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_suspend()
3732 flush_work(&mgr->delayed_destroy_work); in drm_dp_mst_topology_mgr_suspend()
3734 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3735 if (mgr->mst_state && mgr->mst_primary) in drm_dp_mst_topology_mgr_suspend()
3736 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); in drm_dp_mst_topology_mgr_suspend()
3737 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
3742 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3744 * @sync: whether or not to perform topology reprobing synchronously
3749 * If the device fails this returns -1, and the driver should do
3758 * Returns: -1 if the MST topology was removed while we were suspended, 0
3767 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3768 if (!mgr->mst_primary) in drm_dp_mst_topology_mgr_resume()
3771 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { in drm_dp_mst_topology_mgr_resume()
3772 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3776 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_resume()
3781 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3786 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); in drm_dp_mst_topology_mgr_resume()
3788 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3792 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid); in drm_dp_mst_topology_mgr_resume()
3794 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); in drm_dp_mst_topology_mgr_resume()
3799 * For the final step of resuming the topology, we need to bring the in drm_dp_mst_topology_mgr_resume()
3800 * state of our in-memory topology back into sync with reality. So, in drm_dp_mst_topology_mgr_resume()
3803 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_topology_mgr_resume()
3804 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3807 drm_dbg_kms(mgr->dev, in drm_dp_mst_topology_mgr_resume()
3808 "Waiting for link probe work to finish re-syncing topology...\n"); in drm_dp_mst_topology_mgr_resume()
3809 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_resume()
3815 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
3816 return -1; in drm_dp_mst_topology_mgr_resume()
3831 up ? &mgr->up_req_recv : &mgr->down_rep_recv; in drm_dp_get_one_sb_msg()
3838 len = min(mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
3839 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); in drm_dp_get_one_sb_msg()
3841 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); in drm_dp_get_one_sb_msg()
3849 drm_dbg_kms(mgr->dev, "ERROR: failed header\n"); in drm_dp_get_one_sb_msg()
3857 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct); in drm_dp_get_one_sb_msg()
3863 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]); in drm_dp_get_one_sb_msg()
3867 replylen = min(msg->curchunk_len, (u8)(len - hdrlen)); in drm_dp_get_one_sb_msg()
3870 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]); in drm_dp_get_one_sb_msg()
3874 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len; in drm_dp_get_one_sb_msg()
3877 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
3878 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, in drm_dp_get_one_sb_msg()
3881 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", in drm_dp_get_one_sb_msg()
3888 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n"); in drm_dp_get_one_sb_msg()
3893 replylen -= len; in drm_dp_get_one_sb_msg()
3902 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv; in drm_dp_mst_handle_down_rep()
3907 /* Multi-packet message transmission, don't clear the reply */ in drm_dp_mst_handle_down_rep()
3908 if (!msg->have_eomt) in drm_dp_mst_handle_down_rep()
3912 mutex_lock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
3913 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, in drm_dp_mst_handle_down_rep()
3915 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
3918 if (!txmsg || txmsg->dst != mstb) { in drm_dp_mst_handle_down_rep()
3921 hdr = &msg->initial_hdr; in drm_dp_mst_handle_down_rep()
3922 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", in drm_dp_mst_handle_down_rep()
3923 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); in drm_dp_mst_handle_down_rep()
3927 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply); in drm_dp_mst_handle_down_rep()
3929 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_handle_down_rep()
3930 drm_dbg_kms(mgr->dev, in drm_dp_mst_handle_down_rep()
3932 txmsg->reply.req_type, in drm_dp_mst_handle_down_rep()
3933 drm_dp_mst_req_type_str(txmsg->reply.req_type), in drm_dp_mst_handle_down_rep()
3934 txmsg->reply.u.nak.reason, in drm_dp_mst_handle_down_rep()
3935 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), in drm_dp_mst_handle_down_rep()
3936 txmsg->reply.u.nak.nak_data); in drm_dp_mst_handle_down_rep()
3942 mutex_lock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
3943 txmsg->state = DRM_DP_SIDEBAND_TX_RX; in drm_dp_mst_handle_down_rep()
3944 list_del(&txmsg->next); in drm_dp_mst_handle_down_rep()
3945 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
3947 wake_up_all(&mgr->tx_waitq); in drm_dp_mst_handle_down_rep()
3965 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; in drm_dp_mst_process_up_req()
3966 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; in drm_dp_mst_process_up_req()
3969 if (hdr->broadcast) { in drm_dp_mst_process_up_req()
3972 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) in drm_dp_mst_process_up_req()
3973 guid = msg->u.conn_stat.guid; in drm_dp_mst_process_up_req()
3974 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) in drm_dp_mst_process_up_req()
3975 guid = msg->u.resource_stat.guid; in drm_dp_mst_process_up_req()
3980 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); in drm_dp_mst_process_up_req()
3984 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct); in drm_dp_mst_process_up_req()
3989 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { in drm_dp_mst_process_up_req()
3990 dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); in drm_dp_mst_process_up_req()
3997 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_process_up_req()
4009 mutex_lock(&mgr->probe_lock); in drm_dp_mst_up_req_work()
4011 mutex_lock(&mgr->up_req_lock); in drm_dp_mst_up_req_work()
4012 up_req = list_first_entry_or_null(&mgr->up_req_list, in drm_dp_mst_up_req_work()
4016 list_del(&up_req->next); in drm_dp_mst_up_req_work()
4017 mutex_unlock(&mgr->up_req_lock); in drm_dp_mst_up_req_work()
4025 mutex_unlock(&mgr->probe_lock); in drm_dp_mst_up_req_work()
4028 drm_kms_helper_hotplug_event(mgr->dev); in drm_dp_mst_up_req_work()
4038 if (!mgr->up_req_recv.have_eomt) in drm_dp_mst_handle_up_req()
4043 return -ENOMEM; in drm_dp_mst_handle_up_req()
4045 INIT_LIST_HEAD(&up_req->next); in drm_dp_mst_handle_up_req()
4047 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg); in drm_dp_mst_handle_up_req()
4049 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && in drm_dp_mst_handle_up_req()
4050 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4051 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", in drm_dp_mst_handle_up_req()
4052 up_req->msg.req_type); in drm_dp_mst_handle_up_req()
4057 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, in drm_dp_mst_handle_up_req()
4060 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4062 &up_req->msg.u.conn_stat; in drm_dp_mst_handle_up_req()
4064 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", in drm_dp_mst_handle_up_req()
4065 conn_stat->port_number, in drm_dp_mst_handle_up_req()
4066 conn_stat->legacy_device_plug_status, in drm_dp_mst_handle_up_req()
4067 conn_stat->displayport_device_plug_status, in drm_dp_mst_handle_up_req()
4068 conn_stat->message_capability_status, in drm_dp_mst_handle_up_req()
4069 conn_stat->input_port, in drm_dp_mst_handle_up_req()
4070 conn_stat->peer_device_type); in drm_dp_mst_handle_up_req()
4071 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { in drm_dp_mst_handle_up_req()
4073 &up_req->msg.u.resource_stat; in drm_dp_mst_handle_up_req()
4075 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n", in drm_dp_mst_handle_up_req()
4076 res_stat->port_number, in drm_dp_mst_handle_up_req()
4077 res_stat->available_pbn); in drm_dp_mst_handle_up_req()
4080 up_req->hdr = mgr->up_req_recv.initial_hdr; in drm_dp_mst_handle_up_req()
4081 mutex_lock(&mgr->up_req_lock); in drm_dp_mst_handle_up_req()
4082 list_add_tail(&up_req->next, &mgr->up_req_list); in drm_dp_mst_handle_up_req()
4083 mutex_unlock(&mgr->up_req_lock); in drm_dp_mst_handle_up_req()
4084 queue_work(system_long_wq, &mgr->up_req_work); in drm_dp_mst_handle_up_req()
4087 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_up_req()
4092 * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
4100 * topology manager will process the sideband messages received
4120 if (sc != mgr->sink_count) { in drm_dp_mst_hpd_irq_handle_event()
4121 mgr->sink_count = sc; in drm_dp_mst_hpd_irq_handle_event()
4142 * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
4155 mutex_lock(&mgr->qlock); in drm_dp_mst_hpd_irq_send_new_request()
4156 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, in drm_dp_mst_hpd_irq_send_new_request()
4160 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || in drm_dp_mst_hpd_irq_send_new_request()
4161 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) in drm_dp_mst_hpd_irq_send_new_request()
4163 mutex_unlock(&mgr->qlock); in drm_dp_mst_hpd_irq_send_new_request()
4170 * drm_dp_mst_detect_port() - get connection status for an MST port
4191 ret = drm_modeset_lock(&mgr->base.lock, ctx); in drm_dp_mst_detect_port()
4197 if (!port->ddps) in drm_dp_mst_detect_port()
4200 switch (port->pdt) { in drm_dp_mst_detect_port()
4204 if (!port->mcs) in drm_dp_mst_detect_port()
4210 /* for logical ports - cache the EDID */ in drm_dp_mst_detect_port()
4211 if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid) in drm_dp_mst_detect_port()
4212 port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc); in drm_dp_mst_detect_port()
4215 if (port->ldps) in drm_dp_mst_detect_port()
4226 * drm_dp_mst_edid_read() - get EDID for an MST port
4246 if (port->cached_edid) in drm_dp_mst_edid_read()
4247 drm_edid = drm_edid_dup(port->cached_edid); in drm_dp_mst_edid_read()
4249 drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc); in drm_dp_mst_edid_read()
4258 * drm_dp_mst_get_edid() - get EDID for an MST port
4287 * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
4289 * @mgr: MST topology manager for the port
4297 * atomic state is added whenever the state of payloads in the topology changes.
4327 conn_state = drm_atomic_get_new_connector_state(state, port->connector); in drm_dp_atomic_find_time_slots()
4328 topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc); in drm_dp_atomic_find_time_slots()
4333 prev_slots = payload->time_slots; in drm_dp_atomic_find_time_slots()
4334 prev_bw = payload->pbn; in drm_dp_atomic_find_time_slots()
4341 if (drm_WARN_ON(mgr->dev, payload->delete)) { in drm_dp_atomic_find_time_slots()
4342 drm_err(mgr->dev, in drm_dp_atomic_find_time_slots()
4345 return -EINVAL; in drm_dp_atomic_find_time_slots()
4349 req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full); in drm_dp_atomic_find_time_slots()
4351 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n", in drm_dp_atomic_find_time_slots()
4352 port->connector->base.id, port->connector->name, in drm_dp_atomic_find_time_slots()
4354 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", in drm_dp_atomic_find_time_slots()
4355 port->connector->base.id, port->connector->name, in drm_dp_atomic_find_time_slots()
4362 return -ENOMEM; in drm_dp_atomic_find_time_slots()
4365 payload->port = port; in drm_dp_atomic_find_time_slots()
4366 payload->vc_start_slot = -1; in drm_dp_atomic_find_time_slots()
4367 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE; in drm_dp_atomic_find_time_slots()
4368 list_add(&payload->next, &topology_state->payloads); in drm_dp_atomic_find_time_slots()
4370 payload->time_slots = req_slots; in drm_dp_atomic_find_time_slots()
4371 payload->pbn = pbn; in drm_dp_atomic_find_time_slots()
4378 * drm_dp_atomic_release_time_slots() - Release allocated time slots
4380 * @mgr: MST topology manager for the port
4389 * topology.
4413 old_conn_state = drm_atomic_get_old_connector_state(state, port->connector); in drm_dp_atomic_release_time_slots()
4414 if (!old_conn_state->crtc) in drm_dp_atomic_release_time_slots()
4418 new_conn_state = drm_atomic_get_new_connector_state(state, port->connector); in drm_dp_atomic_release_time_slots()
4419 if (new_conn_state->crtc) { in drm_dp_atomic_release_time_slots()
4421 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); in drm_dp_atomic_release_time_slots()
4427 if (!crtc_state->mode_changed && !crtc_state->connectors_changed) in drm_dp_atomic_release_time_slots()
4435 topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc); in drm_dp_atomic_release_time_slots()
4441 drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n", in drm_dp_atomic_release_time_slots()
4442 port, &topology_state->base); in drm_dp_atomic_release_time_slots()
4443 return -EINVAL; in drm_dp_atomic_release_time_slots()
4446 if (new_conn_state->crtc) in drm_dp_atomic_release_time_slots()
4449 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots); in drm_dp_atomic_release_time_slots()
4450 if (!payload->delete) { in drm_dp_atomic_release_time_slots()
4451 payload->pbn = 0; in drm_dp_atomic_release_time_slots()
4452 payload->delete = true; in drm_dp_atomic_release_time_slots()
4453 topology_state->payload_mask &= ~BIT(payload->vcpi - 1); in drm_dp_atomic_release_time_slots()
4461 * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
4465 * currently assigned to an MST topology. Drivers must call this hook from their
4480 if (!mst_state->pending_crtc_mask) in drm_dp_mst_atomic_setup_commit()
4483 num_commit_deps = hweight32(mst_state->pending_crtc_mask); in drm_dp_mst_atomic_setup_commit()
4484 mst_state->commit_deps = kmalloc_array(num_commit_deps, in drm_dp_mst_atomic_setup_commit()
4485 sizeof(*mst_state->commit_deps), GFP_KERNEL); in drm_dp_mst_atomic_setup_commit()
4486 if (!mst_state->commit_deps) in drm_dp_mst_atomic_setup_commit()
4487 return -ENOMEM; in drm_dp_mst_atomic_setup_commit()
4488 mst_state->num_commit_deps = num_commit_deps; in drm_dp_mst_atomic_setup_commit()
4492 if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) { in drm_dp_mst_atomic_setup_commit()
4493 mst_state->commit_deps[commit_idx++] = in drm_dp_mst_atomic_setup_commit()
4494 drm_crtc_commit_get(crtc_state->commit); in drm_dp_mst_atomic_setup_commit()
4504 * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
4509 * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
4510 * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
4512 * the modeset objects in these commits share are an MST topology.
4516 * determined at commit-time) from the previous state.
4529 for (j = 0; j < old_mst_state->num_commit_deps; j++) { in drm_dp_mst_atomic_wait_for_dependencies()
4530 ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]); in drm_dp_mst_atomic_wait_for_dependencies()
4532 drm_err(state->dev, "Failed to wait for %s: %d\n", in drm_dp_mst_atomic_wait_for_dependencies()
4533 old_mst_state->commit_deps[j]->crtc->name, ret); in drm_dp_mst_atomic_wait_for_dependencies()
4539 list_for_each_entry(old_payload, &old_mst_state->payloads, next) { in drm_dp_mst_atomic_wait_for_dependencies()
4540 if (old_payload->delete) in drm_dp_mst_atomic_wait_for_dependencies()
4544 old_payload->port); in drm_dp_mst_atomic_wait_for_dependencies()
4545 new_payload->vc_start_slot = old_payload->vc_start_slot; in drm_dp_mst_atomic_wait_for_dependencies()
4546 new_payload->payload_allocation_status = in drm_dp_mst_atomic_wait_for_dependencies()
4547 old_payload->payload_allocation_status; in drm_dp_mst_atomic_wait_for_dependencies()
4554 * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
4557 * @mgr: The MST topology manager for the &drm_connector
4560 * serialize non-blocking commits happening on the real DP connector of an MST topology switching
4561 * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
4562 * MST topology will never share the same &drm_encoder.
4565 * state to determine if it is about to have a modeset - and then pulling in the MST topology state
4578 struct drm_atomic_state *state = new_conn_state->state; in drm_dp_mst_root_conn_atomic_check()
4580 drm_atomic_get_old_connector_state(state, new_conn_state->connector); in drm_dp_mst_root_conn_atomic_check()
4584 if (new_conn_state->crtc) { in drm_dp_mst_root_conn_atomic_check()
4585 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4591 mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4595 if (old_conn_state->crtc) { in drm_dp_mst_root_conn_atomic_check()
4596 crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4604 mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc); in drm_dp_mst_root_conn_atomic_check()
4613 * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
4620 mst_state->total_avail_slots = 64; in drm_dp_mst_update_slots()
4621 mst_state->start_slot = 0; in drm_dp_mst_update_slots()
4623 mst_state->total_avail_slots = 63; in drm_dp_mst_update_slots()
4624 mst_state->start_slot = 1; in drm_dp_mst_update_slots()
4640 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, in drm_dp_dpcd_write_payload()
4647 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); in drm_dp_dpcd_write_payload()
4649 drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret); in drm_dp_dpcd_write_payload()
4654 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); in drm_dp_dpcd_write_payload()
4656 drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret); in drm_dp_dpcd_write_payload()
4666 drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n", in drm_dp_dpcd_write_payload()
4668 ret = -EINVAL; in drm_dp_dpcd_write_payload()
4689 * drm_dp_check_act_status() - Polls for ACT handled status.
4693 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4710 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, in drm_dp_check_act_status()
4714 drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n", in drm_dp_check_act_status()
4716 return -EINVAL; in drm_dp_check_act_status()
4719 * Failure here isn't unexpected - the hub may have in drm_dp_check_act_status()
4722 drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status); in drm_dp_check_act_status()
4731 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4768 queue_work(system_long_wq, &mgr->tx_work); in drm_dp_mst_kick_tx()
4797 int tabs = mstb->lct; in drm_dp_mst_dump_mstb()
4805 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports); in drm_dp_mst_dump_mstb()
4806 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_dump_mstb()
4807 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n", in drm_dp_mst_dump_mstb()
4809 port->port_num, in drm_dp_mst_dump_mstb()
4811 port->input ? "input" : "output", in drm_dp_mst_dump_mstb()
4812 pdt_to_string(port->pdt), in drm_dp_mst_dump_mstb()
4813 port->ddps, in drm_dp_mst_dump_mstb()
4814 port->ldps, in drm_dp_mst_dump_mstb()
4815 port->num_sdp_streams, in drm_dp_mst_dump_mstb()
4816 port->num_sdp_stream_sinks, in drm_dp_mst_dump_mstb()
4817 port->fec_capable ? "true" : "false", in drm_dp_mst_dump_mstb()
4818 port->connector); in drm_dp_mst_dump_mstb()
4819 if (port->mstb) in drm_dp_mst_dump_mstb()
4820 drm_dp_mst_dump_mstb(m, port->mstb); in drm_dp_mst_dump_mstb()
4832 if (drm_dp_dpcd_read(mgr->aux, in dump_dp_payload_table()
4846 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); in fetch_monitor_name()
4852 * drm_dp_mst_dump_topology(): dump topology to seq file.
4854 * @mgr: manager to dump current topology for.
4856 * helper to dump MST topology to a seq file for debugfs.
4872 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
4873 if (mgr->mst_primary) in drm_dp_mst_dump_topology()
4874 drm_dp_mst_dump_mstb(m, mgr->mst_primary); in drm_dp_mst_dump_topology()
4877 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
4879 ret = drm_modeset_lock_single_interruptible(&mgr->base.lock); in drm_dp_mst_dump_topology()
4883 state = to_drm_dp_mst_topology_state(mgr->base.state); in drm_dp_mst_dump_topology()
4886 state->payload_mask, mgr->max_payloads, state->start_slot, in drm_dp_mst_dump_topology()
4887 dfixed_trunc(state->pbn_div)); in drm_dp_mst_dump_topology()
4890 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_dump_topology()
4891 list_for_each_entry(payload, &state->payloads, next) { in drm_dp_mst_dump_topology()
4894 if (payload->vcpi != i || payload->delete) in drm_dp_mst_dump_topology()
4897 fetch_monitor_name(mgr, payload->port, name, sizeof(name)); in drm_dp_mst_dump_topology()
4898 seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %8s %19s\n", in drm_dp_mst_dump_topology()
4900 payload->port->port_num, in drm_dp_mst_dump_topology()
4901 payload->vcpi, in drm_dp_mst_dump_topology()
4902 payload->vc_start_slot, in drm_dp_mst_dump_topology()
4903 payload->vc_start_slot + payload->time_slots - 1, in drm_dp_mst_dump_topology()
4904 payload->pbn, in drm_dp_mst_dump_topology()
4905 payload->dsc_enabled ? "Y" : "N", in drm_dp_mst_dump_topology()
4906 status[payload->payload_allocation_status], in drm_dp_mst_dump_topology()
4912 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
4913 if (mgr->mst_primary) { in drm_dp_mst_dump_topology()
4917 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) { in drm_dp_mst_dump_topology()
4923 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); in drm_dp_mst_dump_topology()
4930 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); in drm_dp_mst_dump_topology()
4938 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); in drm_dp_mst_dump_topology()
4954 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
4955 drm_modeset_unlock(&mgr->base.lock); in drm_dp_mst_dump_topology()
4963 mutex_lock(&mgr->qlock); in drm_dp_tx_work()
4964 if (!list_empty(&mgr->tx_msg_downq)) in drm_dp_tx_work()
4966 mutex_unlock(&mgr->qlock); in drm_dp_tx_work()
4972 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); in drm_dp_delayed_destroy_port()
4974 if (port->connector) { in drm_dp_delayed_destroy_port()
4975 drm_connector_unregister(port->connector); in drm_dp_delayed_destroy_port()
4976 drm_connector_put(port->connector); in drm_dp_delayed_destroy_port()
4985 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_delayed_destroy_mstb()
4990 mutex_lock(&mgr->lock); in drm_dp_delayed_destroy_mstb()
4991 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) { in drm_dp_delayed_destroy_mstb()
4992 list_del(&port->next); in drm_dp_delayed_destroy_mstb()
4995 mutex_unlock(&mgr->lock); in drm_dp_delayed_destroy_mstb()
4998 mutex_lock(&mstb->mgr->qlock); in drm_dp_delayed_destroy_mstb()
4999 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) { in drm_dp_delayed_destroy_mstb()
5000 if (txmsg->dst != mstb) in drm_dp_delayed_destroy_mstb()
5003 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in drm_dp_delayed_destroy_mstb()
5004 list_del(&txmsg->next); in drm_dp_delayed_destroy_mstb()
5007 mutex_unlock(&mstb->mgr->qlock); in drm_dp_delayed_destroy_mstb()
5010 wake_up_all(&mstb->mgr->tx_waitq); in drm_dp_delayed_destroy_mstb()
5024 * connector lock before destroying the mstb/port, to avoid AB->BA in drm_dp_delayed_destroy_work()
5033 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5034 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, in drm_dp_delayed_destroy_work()
5038 list_del(&mstb->destroy_next); in drm_dp_delayed_destroy_work()
5039 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5051 mutex_lock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5052 port = list_first_entry_or_null(&mgr->destroy_port_list, in drm_dp_delayed_destroy_work()
5056 list_del(&port->next); in drm_dp_delayed_destroy_work()
5057 mutex_unlock(&mgr->delayed_destroy_lock); in drm_dp_delayed_destroy_work()
5069 drm_kms_helper_hotplug_event(mgr->dev); in drm_dp_delayed_destroy_work()
5076 to_dp_mst_topology_state(obj->state); in drm_dp_mst_duplicate_state()
5083 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); in drm_dp_mst_duplicate_state()
5085 INIT_LIST_HEAD(&state->payloads); in drm_dp_mst_duplicate_state()
5086 state->commit_deps = NULL; in drm_dp_mst_duplicate_state()
5087 state->num_commit_deps = 0; in drm_dp_mst_duplicate_state()
5088 state->pending_crtc_mask = 0; in drm_dp_mst_duplicate_state()
5090 list_for_each_entry(pos, &old_state->payloads, next) { in drm_dp_mst_duplicate_state()
5092 if (pos->delete) in drm_dp_mst_duplicate_state()
5099 drm_dp_mst_get_port_malloc(payload->port); in drm_dp_mst_duplicate_state()
5100 list_add(&payload->next, &state->payloads); in drm_dp_mst_duplicate_state()
5103 return &state->base; in drm_dp_mst_duplicate_state()
5106 list_for_each_entry_safe(pos, payload, &state->payloads, next) { in drm_dp_mst_duplicate_state()
5107 drm_dp_mst_put_port_malloc(pos->port); in drm_dp_mst_duplicate_state()
5123 list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) { in drm_dp_mst_destroy_state()
5125 if (!pos->delete) in drm_dp_mst_destroy_state()
5126 drm_dp_mst_put_port_malloc(pos->port); in drm_dp_mst_destroy_state()
5130 for (i = 0; i < mst_state->num_commit_deps; i++) in drm_dp_mst_destroy_state()
5131 drm_crtc_commit_put(mst_state->commit_deps[i]); in drm_dp_mst_destroy_state()
5133 kfree(mst_state->commit_deps); in drm_dp_mst_destroy_state()
5140 while (port->parent) { in drm_dp_mst_port_downstream_of_branch()
5141 if (port->parent == branch) in drm_dp_mst_port_downstream_of_branch()
5144 if (port->parent->port_parent) in drm_dp_mst_port_downstream_of_branch()
5145 port = port->parent->port_parent; in drm_dp_mst_port_downstream_of_branch()
5157 if (!mgr->mst_primary) in drm_dp_mst_port_downstream_of_parent_locked()
5160 port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, in drm_dp_mst_port_downstream_of_parent_locked()
5168 parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, in drm_dp_mst_port_downstream_of_parent_locked()
5173 if (!parent->mstb) in drm_dp_mst_port_downstream_of_parent_locked()
5176 return drm_dp_mst_port_downstream_of_branch(port, parent->mstb); in drm_dp_mst_port_downstream_of_parent_locked()
5180 * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port
5181 * @mgr: MST topology manager
5186 * %NULL - denoting the root port - the function returns %true if @port is in
5187 * @mgr's topology.
5196 mutex_lock(&mgr->lock); in drm_dp_mst_port_downstream_of_parent()
5198 mutex_unlock(&mgr->lock); in drm_dp_mst_port_downstream_of_parent()
5222 list_for_each_entry(payload, &state->payloads, next) { in drm_dp_mst_atomic_check_mstb_bw_limit()
5223 if (!payload->pbn || in drm_dp_mst_atomic_check_mstb_bw_limit()
5224 !drm_dp_mst_port_downstream_of_branch(payload->port, mstb)) in drm_dp_mst_atomic_check_mstb_bw_limit()
5233 if (mstb->port_parent) in drm_dp_mst_atomic_check_mstb_bw_limit()
5234 drm_dbg_atomic(mstb->mgr->dev, in drm_dp_mst_atomic_check_mstb_bw_limit()
5236 mstb->port_parent->parent, mstb->port_parent, mstb); in drm_dp_mst_atomic_check_mstb_bw_limit()
5238 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); in drm_dp_mst_atomic_check_mstb_bw_limit()
5240 list_for_each_entry(port, &mstb->ports, next) { in drm_dp_mst_atomic_check_mstb_bw_limit()
5259 if (port->pdt == DP_PEER_DEVICE_NONE) in drm_dp_mst_atomic_check_port_bw_limit()
5262 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) { in drm_dp_mst_atomic_check_port_bw_limit()
5271 if (!port->full_pbn) { in drm_dp_mst_atomic_check_port_bw_limit()
5272 drm_dbg_atomic(port->mgr->dev, in drm_dp_mst_atomic_check_port_bw_limit()
5274 port->parent, port); in drm_dp_mst_atomic_check_port_bw_limit()
5276 return -EINVAL; in drm_dp_mst_atomic_check_port_bw_limit()
5279 pbn_used = payload->pbn; in drm_dp_mst_atomic_check_port_bw_limit()
5281 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, in drm_dp_mst_atomic_check_port_bw_limit()
5288 if (pbn_used > port->full_pbn) { in drm_dp_mst_atomic_check_port_bw_limit()
5289 drm_dbg_atomic(port->mgr->dev, in drm_dp_mst_atomic_check_port_bw_limit()
5291 port->parent, port, pbn_used, port->full_pbn); in drm_dp_mst_atomic_check_port_bw_limit()
5293 return -ENOSPC; in drm_dp_mst_atomic_check_port_bw_limit()
5296 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n", in drm_dp_mst_atomic_check_port_bw_limit()
5297 port->parent, port, pbn_used, port->full_pbn); in drm_dp_mst_atomic_check_port_bw_limit()
5307 int avail_slots = mst_state->total_avail_slots, payload_count = 0; in drm_dp_mst_atomic_check_payload_alloc_limits()
5309 list_for_each_entry(payload, &mst_state->payloads, next) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5310 /* Releasing payloads is always OK-even if the port is gone */ in drm_dp_mst_atomic_check_payload_alloc_limits()
5311 if (payload->delete) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5312 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5313 payload->port); in drm_dp_mst_atomic_check_payload_alloc_limits()
5317 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5318 payload->port, payload->time_slots); in drm_dp_mst_atomic_check_payload_alloc_limits()
5320 avail_slots -= payload->time_slots; in drm_dp_mst_atomic_check_payload_alloc_limits()
5322 drm_dbg_atomic(mgr->dev, in drm_dp_mst_atomic_check_payload_alloc_limits()
5324 payload->port, mst_state, avail_slots + payload->time_slots); in drm_dp_mst_atomic_check_payload_alloc_limits()
5325 return -ENOSPC; in drm_dp_mst_atomic_check_payload_alloc_limits()
5328 if (++payload_count > mgr->max_payloads) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5329 drm_dbg_atomic(mgr->dev, in drm_dp_mst_atomic_check_payload_alloc_limits()
5331 mgr, mst_state, mgr->max_payloads); in drm_dp_mst_atomic_check_payload_alloc_limits()
5332 return -EINVAL; in drm_dp_mst_atomic_check_payload_alloc_limits()
5336 if (!payload->vcpi) { in drm_dp_mst_atomic_check_payload_alloc_limits()
5337 payload->vcpi = ffz(mst_state->payload_mask) + 1; in drm_dp_mst_atomic_check_payload_alloc_limits()
5338 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5339 payload->port, payload->vcpi); in drm_dp_mst_atomic_check_payload_alloc_limits()
5340 mst_state->payload_mask |= BIT(payload->vcpi - 1); in drm_dp_mst_atomic_check_payload_alloc_limits()
5345 mst_state->pbn_div.full = dfixed_const(0); in drm_dp_mst_atomic_check_payload_alloc_limits()
5347 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n", in drm_dp_mst_atomic_check_payload_alloc_limits()
5348 mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots, in drm_dp_mst_atomic_check_payload_alloc_limits()
5349 mst_state->total_avail_slots - avail_slots); in drm_dp_mst_atomic_check_payload_alloc_limits()
5357 * @mgr: MST topology manager
5359 * Whenever there is a change in mst topology
5362 * CRTCs in that topology
5381 list_for_each_entry(pos, &mst_state->payloads, next) { in drm_dp_mst_add_affected_dsc_crtcs()
5383 connector = pos->port->connector; in drm_dp_mst_add_affected_dsc_crtcs()
5386 return -EINVAL; in drm_dp_mst_add_affected_dsc_crtcs()
5393 crtc = conn_state->crtc; in drm_dp_mst_add_affected_dsc_crtcs()
5398 if (!drm_dp_mst_dsc_aux_for_port(pos->port)) in drm_dp_mst_add_affected_dsc_crtcs()
5401 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); in drm_dp_mst_add_affected_dsc_crtcs()
5406 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", in drm_dp_mst_add_affected_dsc_crtcs()
5409 crtc_state->mode_changed = true; in drm_dp_mst_add_affected_dsc_crtcs()
5416 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5436 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); in drm_dp_mst_atomic_enable_dsc()
5442 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5445 return -EINVAL; in drm_dp_mst_atomic_enable_dsc()
5448 if (payload->dsc_enabled == enable) { in drm_dp_mst_atomic_enable_dsc()
5449 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5451 port, enable, payload->time_slots); in drm_dp_mst_atomic_enable_dsc()
5452 time_slots = payload->time_slots; in drm_dp_mst_atomic_enable_dsc()
5456 time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn); in drm_dp_mst_atomic_enable_dsc()
5457 drm_dbg_atomic(state->dev, in drm_dp_mst_atomic_enable_dsc()
5461 return -EINVAL; in drm_dp_mst_atomic_enable_dsc()
5464 payload->dsc_enabled = enable; in drm_dp_mst_atomic_enable_dsc()
5471 * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager
5477 * Checks the given MST manager's topology state for an atomic update to ensure
5491 * - 0 if the new state is valid
5492 * - %-ENOSPC, if the new state is invalid, because of BW limitation
5495 * - The non-root port where a BW limit check failed
5500 * - %NULL if the BW limit check failed at the root port
5504 * - %-EINVAL, if the new state is invalid, because the root port has
5516 if (!mgr->mst_state) in drm_dp_mst_atomic_check_mgr()
5519 mutex_lock(&mgr->lock); in drm_dp_mst_atomic_check_mgr()
5520 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, in drm_dp_mst_atomic_check_mgr()
5523 mutex_unlock(&mgr->lock); in drm_dp_mst_atomic_check_mgr()
5533 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5537 * Checks the given topology state for an atomic update to ensure that it's
5580 * drm_atomic_get_mst_topology_state: get MST topology state
5582 * @mgr: MST topology manager, also the private object in this case
5586 * topology object.
5590 * The MST topology state or error pointer.
5595 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); in drm_atomic_get_mst_topology_state()
5600 * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
5602 * @mgr: MST topology manager, also the private object in this case
5606 * topology object.
5610 * The old MST topology state, or NULL if there's no topology state for this MST mgr
5618 drm_atomic_get_old_private_obj_state(state, &mgr->base); in drm_atomic_get_old_mst_topology_state()
5625 * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
5627 * @mgr: MST topology manager, also the private object in this case
5631 * topology object.
5635 * The new MST topology state, or NULL if there's no topology state for this MST mgr
5643 drm_atomic_get_new_private_obj_state(state, &mgr->base); in drm_atomic_get_new_mst_topology_state()
5650 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5652 * @dev: device providing this structure - for i2c addition.
5667 mutex_init(&mgr->lock); in drm_dp_mst_topology_mgr_init()
5668 mutex_init(&mgr->qlock); in drm_dp_mst_topology_mgr_init()
5669 mutex_init(&mgr->delayed_destroy_lock); in drm_dp_mst_topology_mgr_init()
5670 mutex_init(&mgr->up_req_lock); in drm_dp_mst_topology_mgr_init()
5671 mutex_init(&mgr->probe_lock); in drm_dp_mst_topology_mgr_init()
5673 mutex_init(&mgr->topology_ref_history_lock); in drm_dp_mst_topology_mgr_init()
5676 INIT_LIST_HEAD(&mgr->tx_msg_downq); in drm_dp_mst_topology_mgr_init()
5677 INIT_LIST_HEAD(&mgr->destroy_port_list); in drm_dp_mst_topology_mgr_init()
5678 INIT_LIST_HEAD(&mgr->destroy_branch_device_list); in drm_dp_mst_topology_mgr_init()
5679 INIT_LIST_HEAD(&mgr->up_req_list); in drm_dp_mst_topology_mgr_init()
5683 * requeuing will be also flushed when deiniting the topology manager. in drm_dp_mst_topology_mgr_init()
5685 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0); in drm_dp_mst_topology_mgr_init()
5686 if (mgr->delayed_destroy_wq == NULL) in drm_dp_mst_topology_mgr_init()
5687 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5689 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); in drm_dp_mst_topology_mgr_init()
5690 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); in drm_dp_mst_topology_mgr_init()
5691 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); in drm_dp_mst_topology_mgr_init()
5692 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); in drm_dp_mst_topology_mgr_init()
5693 init_waitqueue_head(&mgr->tx_waitq); in drm_dp_mst_topology_mgr_init()
5694 mgr->dev = dev; in drm_dp_mst_topology_mgr_init()
5695 mgr->aux = aux; in drm_dp_mst_topology_mgr_init()
5696 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; in drm_dp_mst_topology_mgr_init()
5697 mgr->max_payloads = max_payloads; in drm_dp_mst_topology_mgr_init()
5698 mgr->conn_base_id = conn_base_id; in drm_dp_mst_topology_mgr_init()
5702 return -ENOMEM; in drm_dp_mst_topology_mgr_init()
5704 mst_state->total_avail_slots = 63; in drm_dp_mst_topology_mgr_init()
5705 mst_state->start_slot = 1; in drm_dp_mst_topology_mgr_init()
5707 mst_state->mgr = mgr; in drm_dp_mst_topology_mgr_init()
5708 INIT_LIST_HEAD(&mst_state->payloads); in drm_dp_mst_topology_mgr_init()
5710 drm_atomic_private_obj_init(dev, &mgr->base, in drm_dp_mst_topology_mgr_init()
5711 &mst_state->base, in drm_dp_mst_topology_mgr_init()
5719 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5725 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_destroy()
5727 if (mgr->delayed_destroy_wq) { in drm_dp_mst_topology_mgr_destroy()
5728 destroy_workqueue(mgr->delayed_destroy_wq); in drm_dp_mst_topology_mgr_destroy()
5729 mgr->delayed_destroy_wq = NULL; in drm_dp_mst_topology_mgr_destroy()
5731 mgr->dev = NULL; in drm_dp_mst_topology_mgr_destroy()
5732 mgr->aux = NULL; in drm_dp_mst_topology_mgr_destroy()
5733 drm_atomic_private_obj_fini(&mgr->base); in drm_dp_mst_topology_mgr_destroy()
5734 mgr->funcs = NULL; in drm_dp_mst_topology_mgr_destroy()
5736 mutex_destroy(&mgr->delayed_destroy_lock); in drm_dp_mst_topology_mgr_destroy()
5737 mutex_destroy(&mgr->qlock); in drm_dp_mst_topology_mgr_destroy()
5738 mutex_destroy(&mgr->lock); in drm_dp_mst_topology_mgr_destroy()
5739 mutex_destroy(&mgr->up_req_lock); in drm_dp_mst_topology_mgr_destroy()
5740 mutex_destroy(&mgr->probe_lock); in drm_dp_mst_topology_mgr_destroy()
5742 mutex_destroy(&mgr->topology_ref_history_lock); in drm_dp_mst_topology_mgr_destroy()
5751 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) in remote_i2c_read_ok()
5754 for (i = 0; i < num - 1; i++) { in remote_i2c_read_ok()
5760 return msgs[num - 1].flags & I2C_M_RD && in remote_i2c_read_ok()
5761 msgs[num - 1].len <= 0xff; in remote_i2c_read_ok()
5768 for (i = 0; i < num - 1; i++) { in remote_i2c_write_ok()
5774 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff; in remote_i2c_write_ok()
5781 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_read()
5789 msg.u.i2c_read.num_transactions = num - 1; in drm_dp_mst_i2c_read()
5790 msg.u.i2c_read.port_number = port->port_num; in drm_dp_mst_i2c_read()
5791 for (i = 0; i < num - 1; i++) { in drm_dp_mst_i2c_read()
5797 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; in drm_dp_mst_i2c_read()
5798 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; in drm_dp_mst_i2c_read()
5802 ret = -ENOMEM; in drm_dp_mst_i2c_read()
5806 txmsg->dst = mstb; in drm_dp_mst_i2c_read()
5814 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_i2c_read()
5815 ret = -EREMOTEIO; in drm_dp_mst_i2c_read()
5818 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { in drm_dp_mst_i2c_read()
5819 ret = -EIO; in drm_dp_mst_i2c_read()
5822 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); in drm_dp_mst_i2c_read()
5834 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_write()
5842 ret = -ENOMEM; in drm_dp_mst_i2c_write()
5848 msg.u.i2c_write.port_number = port->port_num; in drm_dp_mst_i2c_write()
5854 txmsg->dst = mstb; in drm_dp_mst_i2c_write()
5861 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { in drm_dp_mst_i2c_write()
5862 ret = -EREMOTEIO; in drm_dp_mst_i2c_write()
5875 /* I2C device */
5879 struct drm_dp_aux *aux = adapter->algo_data; in drm_dp_mst_i2c_xfer()
5883 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_xfer()
5886 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); in drm_dp_mst_i2c_xfer()
5888 return -EREMOTEIO; in drm_dp_mst_i2c_xfer()
5895 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n"); in drm_dp_mst_i2c_xfer()
5896 ret = -EIO; in drm_dp_mst_i2c_xfer()
5917 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5918 * @port: The port to add the I2C bus on
5924 struct drm_dp_aux *aux = &port->aux; in drm_dp_mst_register_i2c_bus()
5925 struct device *parent_dev = port->mgr->dev->dev; in drm_dp_mst_register_i2c_bus()
5927 aux->ddc.algo = &drm_dp_mst_i2c_algo; in drm_dp_mst_register_i2c_bus()
5928 aux->ddc.algo_data = aux; in drm_dp_mst_register_i2c_bus()
5929 aux->ddc.retries = 3; in drm_dp_mst_register_i2c_bus()
5931 aux->ddc.owner = THIS_MODULE; in drm_dp_mst_register_i2c_bus()
5933 aux->ddc.dev.parent = parent_dev; in drm_dp_mst_register_i2c_bus()
5934 aux->ddc.dev.of_node = parent_dev->of_node; in drm_dp_mst_register_i2c_bus()
5936 strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev), in drm_dp_mst_register_i2c_bus()
5937 sizeof(aux->ddc.name)); in drm_dp_mst_register_i2c_bus()
5939 return i2c_add_adapter(&aux->ddc); in drm_dp_mst_register_i2c_bus()
5943 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5944 * @port: The port to remove the I2C bus from
5948 i2c_del_adapter(&port->aux.ddc); in drm_dp_mst_unregister_i2c_bus()
5952 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5955 * A single physical MST hub object can be represented in the topology
5962 * May acquire mgr->lock
5971 if (!port || port->dpcd_rev < DP_DPCD_REV_14) in drm_dp_mst_is_virtual_dpcd()
5975 if (port->port_num >= 8) in drm_dp_mst_is_virtual_dpcd()
5978 /* DP-to-HDMI Protocol Converter */ in drm_dp_mst_is_virtual_dpcd()
5979 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && in drm_dp_mst_is_virtual_dpcd()
5980 !port->mcs && in drm_dp_mst_is_virtual_dpcd()
5981 port->ldps) in drm_dp_mst_is_virtual_dpcd()
5984 /* DP-to-DP */ in drm_dp_mst_is_virtual_dpcd()
5985 mutex_lock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
5986 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && in drm_dp_mst_is_virtual_dpcd()
5987 port->mstb && in drm_dp_mst_is_virtual_dpcd()
5988 port->mstb->num_ports == 2) { in drm_dp_mst_is_virtual_dpcd()
5989 list_for_each_entry(downstream_port, &port->mstb->ports, next) { in drm_dp_mst_is_virtual_dpcd()
5990 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && in drm_dp_mst_is_virtual_dpcd()
5991 !downstream_port->input) { in drm_dp_mst_is_virtual_dpcd()
5992 mutex_unlock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
5997 mutex_unlock(&port->mgr->lock); in drm_dp_mst_is_virtual_dpcd()
6003 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
6030 if (port->parent->port_parent) in drm_dp_mst_dsc_aux_for_port()
6031 immediate_upstream_port = port->parent->port_parent; in drm_dp_mst_dsc_aux_for_port()
6042 !fec_port->fec_capable) in drm_dp_mst_dsc_aux_for_port()
6045 fec_port = fec_port->parent->port_parent; in drm_dp_mst_dsc_aux_for_port()
6048 /* DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
6052 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6055 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6058 if (drm_dp_dpcd_read(&immediate_upstream_port->aux, in drm_dp_mst_dsc_aux_for_port()
6062 /* Enpoint decompression with DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
6066 port->passthrough_aux = &immediate_upstream_port->aux; in drm_dp_mst_dsc_aux_for_port()
6067 return &port->aux; in drm_dp_mst_dsc_aux_for_port()
6070 /* Virtual DPCD decompression with DP-to-DP peer device */ in drm_dp_mst_dsc_aux_for_port()
6071 return &immediate_upstream_port->aux; in drm_dp_mst_dsc_aux_for_port()
6074 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ in drm_dp_mst_dsc_aux_for_port()
6076 return &port->aux; in drm_dp_mst_dsc_aux_for_port()
6081 * - Physical aux has Synaptics OUI in drm_dp_mst_dsc_aux_for_port()
6082 * - DPv1.4 or higher in drm_dp_mst_dsc_aux_for_port()
6083 * - Port is on primary branch device in drm_dp_mst_dsc_aux_for_port()
6084 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) in drm_dp_mst_dsc_aux_for_port()
6087 immediate_upstream_aux = &immediate_upstream_port->aux; in drm_dp_mst_dsc_aux_for_port()
6089 immediate_upstream_aux = port->mgr->aux; in drm_dp_mst_dsc_aux_for_port()
6109 * connected to the GPU is capable of DSC - in drm_dp_mst_dsc_aux_for_port()
6113 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6116 if (drm_dp_dpcd_read(&port->aux, in drm_dp_mst_dsc_aux_for_port()
6121 return &port->aux; in drm_dp_mst_dsc_aux_for_port()