1 /*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23 #include <linux/bitfield.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/export.h>
27 #include <linux/i2c.h>
28 #include <linux/init.h>
29 #include <linux/kernel.h>
30 #include <linux/random.h>
31 #include <linux/sched.h>
32 #include <linux/seq_file.h>
33
34 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
35 #include <linux/stacktrace.h>
36 #include <linux/sort.h>
37 #include <linux/timekeeping.h>
38 #include <linux/math64.h>
39 #endif
40
41 #include <drm/display/drm_dp_mst_helper.h>
42 #include <drm/drm_atomic.h>
43 #include <drm/drm_atomic_helper.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_edid.h>
46 #include <drm/drm_fixed.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_probe_helper.h>
49
50 #include "drm_dp_helper_internal.h"
51 #include "drm_dp_mst_topology_internal.h"
52
53 /**
54 * DOC: dp mst helper
55 *
56 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
57 * protocol. The helpers contain a topology manager and bandwidth manager.
58 * The helpers encapsulate the sending and received of sideband msgs.
59 */
60 struct drm_dp_pending_up_req {
61 struct drm_dp_sideband_msg_hdr hdr;
62 struct drm_dp_sideband_msg_req_body msg;
63 struct list_head next;
64 };
65
66 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
67 char *buf);
68
69 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
70
71 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
72 struct drm_dp_mst_port *port,
73 int offset, int size, u8 *bytes);
74 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
75 struct drm_dp_mst_port *port,
76 int offset, int size, u8 *bytes);
77
78 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
79 struct drm_dp_mst_branch *mstb);
80
81 static void
82 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
83 struct drm_dp_mst_branch *mstb);
84
85 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
86 struct drm_dp_mst_branch *mstb,
87 struct drm_dp_mst_port *port);
88 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
89 guid_t *guid);
90
91 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
92 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
93 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
94
95 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
96 struct drm_dp_mst_branch *branch);
97
98 #define DBG_PREFIX "[dp_mst]"
99
100 #define DP_STR(x) [DP_ ## x] = #x
101
drm_dp_mst_req_type_str(u8 req_type)102 static const char *drm_dp_mst_req_type_str(u8 req_type)
103 {
104 static const char * const req_type_str[] = {
105 DP_STR(GET_MSG_TRANSACTION_VERSION),
106 DP_STR(LINK_ADDRESS),
107 DP_STR(CONNECTION_STATUS_NOTIFY),
108 DP_STR(ENUM_PATH_RESOURCES),
109 DP_STR(ALLOCATE_PAYLOAD),
110 DP_STR(QUERY_PAYLOAD),
111 DP_STR(RESOURCE_STATUS_NOTIFY),
112 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
113 DP_STR(REMOTE_DPCD_READ),
114 DP_STR(REMOTE_DPCD_WRITE),
115 DP_STR(REMOTE_I2C_READ),
116 DP_STR(REMOTE_I2C_WRITE),
117 DP_STR(POWER_UP_PHY),
118 DP_STR(POWER_DOWN_PHY),
119 DP_STR(SINK_EVENT_NOTIFY),
120 DP_STR(QUERY_STREAM_ENC_STATUS),
121 };
122
123 if (req_type >= ARRAY_SIZE(req_type_str) ||
124 !req_type_str[req_type])
125 return "unknown";
126
127 return req_type_str[req_type];
128 }
129
130 #undef DP_STR
131 #define DP_STR(x) [DP_NAK_ ## x] = #x
132
drm_dp_mst_nak_reason_str(u8 nak_reason)133 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
134 {
135 static const char * const nak_reason_str[] = {
136 DP_STR(WRITE_FAILURE),
137 DP_STR(INVALID_READ),
138 DP_STR(CRC_FAILURE),
139 DP_STR(BAD_PARAM),
140 DP_STR(DEFER),
141 DP_STR(LINK_FAILURE),
142 DP_STR(NO_RESOURCES),
143 DP_STR(DPCD_FAIL),
144 DP_STR(I2C_NAK),
145 DP_STR(ALLOCATE_FAIL),
146 };
147
148 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
149 !nak_reason_str[nak_reason])
150 return "unknown";
151
152 return nak_reason_str[nak_reason];
153 }
154
155 #undef DP_STR
156 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
157
drm_dp_mst_sideband_tx_state_str(int state)158 static const char *drm_dp_mst_sideband_tx_state_str(int state)
159 {
160 static const char * const sideband_reason_str[] = {
161 DP_STR(QUEUED),
162 DP_STR(START_SEND),
163 DP_STR(SENT),
164 DP_STR(RX),
165 DP_STR(TIMEOUT),
166 };
167
168 if (state >= ARRAY_SIZE(sideband_reason_str) ||
169 !sideband_reason_str[state])
170 return "unknown";
171
172 return sideband_reason_str[state];
173 }
174
175 static inline u8
drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct,const u8 * rad)176 drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct, const u8 *rad)
177 {
178 int idx = (lct / 2) - 1;
179 int shift = (lct % 2) ? 0 : 4;
180 u8 ufp_num;
181
182 /* mst_primary, it's rad is unset*/
183 if (lct == 1)
184 return 0;
185
186 ufp_num = (rad[idx] >> shift) & 0xf;
187
188 return ufp_num;
189 }
190
191 static int
drm_dp_mst_rad_to_str(const u8 rad[8],u8 lct,char * out,size_t len)192 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
193 {
194 int i;
195 u8 unpacked_rad[16] = {};
196
197 for (i = 0; i < lct; i++)
198 unpacked_rad[i] = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
199
200 /* TODO: Eventually add something to printk so we can format the rad
201 * like this: 1.2.3
202 */
203 return snprintf(out, len, "%*phC", lct, unpacked_rad);
204 }
205
206 /* sideband msg handling */
drm_dp_msg_header_crc4(const uint8_t * data,size_t num_nibbles)207 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
208 {
209 u8 bitmask = 0x80;
210 u8 bitshift = 7;
211 u8 array_index = 0;
212 int number_of_bits = num_nibbles * 4;
213 u8 remainder = 0;
214
215 while (number_of_bits != 0) {
216 number_of_bits--;
217 remainder <<= 1;
218 remainder |= (data[array_index] & bitmask) >> bitshift;
219 bitmask >>= 1;
220 bitshift--;
221 if (bitmask == 0) {
222 bitmask = 0x80;
223 bitshift = 7;
224 array_index++;
225 }
226 if ((remainder & 0x10) == 0x10)
227 remainder ^= 0x13;
228 }
229
230 number_of_bits = 4;
231 while (number_of_bits != 0) {
232 number_of_bits--;
233 remainder <<= 1;
234 if ((remainder & 0x10) != 0)
235 remainder ^= 0x13;
236 }
237
238 return remainder;
239 }
240
drm_dp_msg_data_crc4(const uint8_t * data,u8 number_of_bytes)241 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
242 {
243 u8 bitmask = 0x80;
244 u8 bitshift = 7;
245 u8 array_index = 0;
246 int number_of_bits = number_of_bytes * 8;
247 u16 remainder = 0;
248
249 while (number_of_bits != 0) {
250 number_of_bits--;
251 remainder <<= 1;
252 remainder |= (data[array_index] & bitmask) >> bitshift;
253 bitmask >>= 1;
254 bitshift--;
255 if (bitmask == 0) {
256 bitmask = 0x80;
257 bitshift = 7;
258 array_index++;
259 }
260 if ((remainder & 0x100) == 0x100)
261 remainder ^= 0xd5;
262 }
263
264 number_of_bits = 8;
265 while (number_of_bits != 0) {
266 number_of_bits--;
267 remainder <<= 1;
268 if ((remainder & 0x100) != 0)
269 remainder ^= 0xd5;
270 }
271
272 return remainder & 0xff;
273 }
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr * hdr)274 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
275 {
276 u8 size = 3;
277
278 size += (hdr->lct / 2);
279 return size;
280 }
281
drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int * len)282 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
283 u8 *buf, int *len)
284 {
285 int idx = 0;
286 int i;
287 u8 crc4;
288
289 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
290 for (i = 0; i < (hdr->lct / 2); i++)
291 buf[idx++] = hdr->rad[i];
292 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
293 (hdr->msg_len & 0x3f);
294 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
295
296 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
297 buf[idx - 1] |= (crc4 & 0xf);
298
299 *len = idx;
300 }
301
drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int buflen,u8 * hdrlen)302 static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
303 struct drm_dp_sideband_msg_hdr *hdr,
304 u8 *buf, int buflen, u8 *hdrlen)
305 {
306 u8 crc4;
307 u8 len;
308 int i;
309 u8 idx;
310
311 if (buf[0] == 0)
312 return false;
313 len = 3;
314 len += ((buf[0] & 0xf0) >> 4) / 2;
315 if (len > buflen)
316 return false;
317 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
318
319 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
320 drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
321 return false;
322 }
323
324 hdr->lct = (buf[0] & 0xf0) >> 4;
325 hdr->lcr = (buf[0] & 0xf);
326 idx = 1;
327 for (i = 0; i < (hdr->lct / 2); i++)
328 hdr->rad[i] = buf[idx++];
329 hdr->broadcast = (buf[idx] >> 7) & 0x1;
330 hdr->path_msg = (buf[idx] >> 6) & 0x1;
331 hdr->msg_len = buf[idx] & 0x3f;
332 if (hdr->msg_len < 1) /* min space for body CRC */
333 return false;
334
335 idx++;
336 hdr->somt = (buf[idx] >> 7) & 0x1;
337 hdr->eomt = (buf[idx] >> 6) & 0x1;
338 hdr->seqno = (buf[idx] >> 4) & 0x1;
339 idx++;
340 *hdrlen = idx;
341 return true;
342 }
343
344 void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body * req,struct drm_dp_sideband_msg_tx * raw)345 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
346 struct drm_dp_sideband_msg_tx *raw)
347 {
348 int idx = 0;
349 int i;
350 u8 *buf = raw->msg;
351
352 buf[idx++] = req->req_type & 0x7f;
353
354 switch (req->req_type) {
355 case DP_ENUM_PATH_RESOURCES:
356 case DP_POWER_DOWN_PHY:
357 case DP_POWER_UP_PHY:
358 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
359 idx++;
360 break;
361 case DP_ALLOCATE_PAYLOAD:
362 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
363 (req->u.allocate_payload.number_sdp_streams & 0xf);
364 idx++;
365 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
366 idx++;
367 buf[idx] = (req->u.allocate_payload.pbn >> 8);
368 idx++;
369 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
370 idx++;
371 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
372 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
373 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
374 idx++;
375 }
376 if (req->u.allocate_payload.number_sdp_streams & 1) {
377 i = req->u.allocate_payload.number_sdp_streams - 1;
378 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
379 idx++;
380 }
381 break;
382 case DP_QUERY_PAYLOAD:
383 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
384 idx++;
385 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
386 idx++;
387 break;
388 case DP_REMOTE_DPCD_READ:
389 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
390 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
391 idx++;
392 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
393 idx++;
394 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
395 idx++;
396 buf[idx] = (req->u.dpcd_read.num_bytes);
397 idx++;
398 break;
399
400 case DP_REMOTE_DPCD_WRITE:
401 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
402 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
403 idx++;
404 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
405 idx++;
406 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
407 idx++;
408 buf[idx] = (req->u.dpcd_write.num_bytes);
409 idx++;
410 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
411 idx += req->u.dpcd_write.num_bytes;
412 break;
413 case DP_REMOTE_I2C_READ:
414 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
415 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
416 idx++;
417 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
418 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
419 idx++;
420 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
421 idx++;
422 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
423 idx += req->u.i2c_read.transactions[i].num_bytes;
424
425 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
426 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
427 idx++;
428 }
429 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
430 idx++;
431 buf[idx] = (req->u.i2c_read.num_bytes_read);
432 idx++;
433 break;
434
435 case DP_REMOTE_I2C_WRITE:
436 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
437 idx++;
438 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
439 idx++;
440 buf[idx] = (req->u.i2c_write.num_bytes);
441 idx++;
442 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
443 idx += req->u.i2c_write.num_bytes;
444 break;
445 case DP_QUERY_STREAM_ENC_STATUS: {
446 const struct drm_dp_query_stream_enc_status *msg;
447
448 msg = &req->u.enc_status;
449 buf[idx] = msg->stream_id;
450 idx++;
451 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
452 idx += sizeof(msg->client_id);
453 buf[idx] = 0;
454 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
455 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
456 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
457 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
458 idx++;
459 }
460 break;
461 }
462 raw->cur_len = idx;
463 }
464 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
465
466 /* Decode a sideband request we've encoded, mainly used for debugging */
467 int
drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx * raw,struct drm_dp_sideband_msg_req_body * req)468 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
469 struct drm_dp_sideband_msg_req_body *req)
470 {
471 const u8 *buf = raw->msg;
472 int i, idx = 0;
473
474 req->req_type = buf[idx++] & 0x7f;
475 switch (req->req_type) {
476 case DP_ENUM_PATH_RESOURCES:
477 case DP_POWER_DOWN_PHY:
478 case DP_POWER_UP_PHY:
479 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
480 break;
481 case DP_ALLOCATE_PAYLOAD:
482 {
483 struct drm_dp_allocate_payload *a =
484 &req->u.allocate_payload;
485
486 a->number_sdp_streams = buf[idx] & 0xf;
487 a->port_number = (buf[idx] >> 4) & 0xf;
488
489 WARN_ON(buf[++idx] & 0x80);
490 a->vcpi = buf[idx] & 0x7f;
491
492 a->pbn = buf[++idx] << 8;
493 a->pbn |= buf[++idx];
494
495 idx++;
496 for (i = 0; i < a->number_sdp_streams; i++) {
497 a->sdp_stream_sink[i] =
498 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
499 }
500 }
501 break;
502 case DP_QUERY_PAYLOAD:
503 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
504 WARN_ON(buf[++idx] & 0x80);
505 req->u.query_payload.vcpi = buf[idx] & 0x7f;
506 break;
507 case DP_REMOTE_DPCD_READ:
508 {
509 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
510
511 r->port_number = (buf[idx] >> 4) & 0xf;
512
513 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
514 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
515 r->dpcd_address |= buf[++idx] & 0xff;
516
517 r->num_bytes = buf[++idx];
518 }
519 break;
520 case DP_REMOTE_DPCD_WRITE:
521 {
522 struct drm_dp_remote_dpcd_write *w =
523 &req->u.dpcd_write;
524
525 w->port_number = (buf[idx] >> 4) & 0xf;
526
527 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
528 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
529 w->dpcd_address |= buf[++idx] & 0xff;
530
531 w->num_bytes = buf[++idx];
532
533 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
534 GFP_KERNEL);
535 if (!w->bytes)
536 return -ENOMEM;
537 }
538 break;
539 case DP_REMOTE_I2C_READ:
540 {
541 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
542 struct drm_dp_remote_i2c_read_tx *tx;
543 bool failed = false;
544
545 r->num_transactions = buf[idx] & 0x3;
546 r->port_number = (buf[idx] >> 4) & 0xf;
547 for (i = 0; i < r->num_transactions; i++) {
548 tx = &r->transactions[i];
549
550 tx->i2c_dev_id = buf[++idx] & 0x7f;
551 tx->num_bytes = buf[++idx];
552 tx->bytes = kmemdup(&buf[++idx],
553 tx->num_bytes,
554 GFP_KERNEL);
555 if (!tx->bytes) {
556 failed = true;
557 break;
558 }
559 idx += tx->num_bytes;
560 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
561 tx->i2c_transaction_delay = buf[idx] & 0xf;
562 }
563
564 if (failed) {
565 for (i = 0; i < r->num_transactions; i++) {
566 tx = &r->transactions[i];
567 kfree(tx->bytes);
568 }
569 return -ENOMEM;
570 }
571
572 r->read_i2c_device_id = buf[++idx] & 0x7f;
573 r->num_bytes_read = buf[++idx];
574 }
575 break;
576 case DP_REMOTE_I2C_WRITE:
577 {
578 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
579
580 w->port_number = (buf[idx] >> 4) & 0xf;
581 w->write_i2c_device_id = buf[++idx] & 0x7f;
582 w->num_bytes = buf[++idx];
583 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
584 GFP_KERNEL);
585 if (!w->bytes)
586 return -ENOMEM;
587 }
588 break;
589 case DP_QUERY_STREAM_ENC_STATUS:
590 req->u.enc_status.stream_id = buf[idx++];
591 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
592 req->u.enc_status.client_id[i] = buf[idx++];
593
594 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
595 buf[idx]);
596 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
597 buf[idx]);
598 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
599 buf[idx]);
600 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
601 buf[idx]);
602 break;
603 }
604
605 return 0;
606 }
607 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
608
609 void
drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body * req,int indent,struct drm_printer * printer)610 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
611 int indent, struct drm_printer *printer)
612 {
613 int i;
614
615 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
616 if (req->req_type == DP_LINK_ADDRESS) {
617 /* No contents to print */
618 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
619 return;
620 }
621
622 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
623 indent++;
624
625 switch (req->req_type) {
626 case DP_ENUM_PATH_RESOURCES:
627 case DP_POWER_DOWN_PHY:
628 case DP_POWER_UP_PHY:
629 P("port=%d\n", req->u.port_num.port_number);
630 break;
631 case DP_ALLOCATE_PAYLOAD:
632 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
633 req->u.allocate_payload.port_number,
634 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
635 req->u.allocate_payload.number_sdp_streams,
636 req->u.allocate_payload.number_sdp_streams,
637 req->u.allocate_payload.sdp_stream_sink);
638 break;
639 case DP_QUERY_PAYLOAD:
640 P("port=%d vcpi=%d\n",
641 req->u.query_payload.port_number,
642 req->u.query_payload.vcpi);
643 break;
644 case DP_REMOTE_DPCD_READ:
645 P("port=%d dpcd_addr=%05x len=%d\n",
646 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
647 req->u.dpcd_read.num_bytes);
648 break;
649 case DP_REMOTE_DPCD_WRITE:
650 P("port=%d addr=%05x len=%d: %*ph\n",
651 req->u.dpcd_write.port_number,
652 req->u.dpcd_write.dpcd_address,
653 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
654 req->u.dpcd_write.bytes);
655 break;
656 case DP_REMOTE_I2C_READ:
657 P("port=%d num_tx=%d id=%d size=%d:\n",
658 req->u.i2c_read.port_number,
659 req->u.i2c_read.num_transactions,
660 req->u.i2c_read.read_i2c_device_id,
661 req->u.i2c_read.num_bytes_read);
662
663 indent++;
664 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
665 const struct drm_dp_remote_i2c_read_tx *rtx =
666 &req->u.i2c_read.transactions[i];
667
668 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
669 i, rtx->i2c_dev_id, rtx->num_bytes,
670 rtx->no_stop_bit, rtx->i2c_transaction_delay,
671 rtx->num_bytes, rtx->bytes);
672 }
673 break;
674 case DP_REMOTE_I2C_WRITE:
675 P("port=%d id=%d size=%d: %*ph\n",
676 req->u.i2c_write.port_number,
677 req->u.i2c_write.write_i2c_device_id,
678 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
679 req->u.i2c_write.bytes);
680 break;
681 case DP_QUERY_STREAM_ENC_STATUS:
682 P("stream_id=%u client_id=%*ph stream_event=%x "
683 "valid_event=%d stream_behavior=%x valid_behavior=%d",
684 req->u.enc_status.stream_id,
685 (int)ARRAY_SIZE(req->u.enc_status.client_id),
686 req->u.enc_status.client_id, req->u.enc_status.stream_event,
687 req->u.enc_status.valid_stream_event,
688 req->u.enc_status.stream_behavior,
689 req->u.enc_status.valid_stream_behavior);
690 break;
691 default:
692 P("???\n");
693 break;
694 }
695 #undef P
696 }
697 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
698
699 static inline void
drm_dp_mst_dump_sideband_msg_tx(struct drm_printer * p,const struct drm_dp_sideband_msg_tx * txmsg)700 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
701 const struct drm_dp_sideband_msg_tx *txmsg)
702 {
703 struct drm_dp_sideband_msg_req_body req;
704 char buf[64];
705 int ret;
706 int i;
707
708 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
709 sizeof(buf));
710 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
711 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
712 drm_dp_mst_sideband_tx_state_str(txmsg->state),
713 txmsg->path_msg, buf);
714
715 ret = drm_dp_decode_sideband_req(txmsg, &req);
716 if (ret) {
717 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
718 return;
719 }
720 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
721
722 switch (req.req_type) {
723 case DP_REMOTE_DPCD_WRITE:
724 kfree(req.u.dpcd_write.bytes);
725 break;
726 case DP_REMOTE_I2C_READ:
727 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
728 kfree(req.u.i2c_read.transactions[i].bytes);
729 break;
730 case DP_REMOTE_I2C_WRITE:
731 kfree(req.u.i2c_write.bytes);
732 break;
733 }
734 }
735
drm_dp_crc_sideband_chunk_req(u8 * msg,u8 len)736 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
737 {
738 u8 crc4;
739
740 crc4 = drm_dp_msg_data_crc4(msg, len);
741 msg[len] = crc4;
742 }
743
drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body * rep,struct drm_dp_sideband_msg_tx * raw)744 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
745 struct drm_dp_sideband_msg_tx *raw)
746 {
747 int idx = 0;
748 u8 *buf = raw->msg;
749
750 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
751
752 raw->cur_len = idx;
753 }
754
drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx * msg,struct drm_dp_sideband_msg_hdr * hdr,u8 hdrlen)755 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
756 struct drm_dp_sideband_msg_hdr *hdr,
757 u8 hdrlen)
758 {
759 /*
760 * ignore out-of-order messages or messages that are part of a
761 * failed transaction
762 */
763 if (!hdr->somt && !msg->have_somt)
764 return false;
765
766 /* get length contained in this portion */
767 msg->curchunk_idx = 0;
768 msg->curchunk_len = hdr->msg_len;
769 msg->curchunk_hdrlen = hdrlen;
770
771 /* we have already gotten an somt - don't bother parsing */
772 if (hdr->somt && msg->have_somt)
773 return false;
774
775 if (hdr->somt) {
776 memcpy(&msg->initial_hdr, hdr,
777 sizeof(struct drm_dp_sideband_msg_hdr));
778 msg->have_somt = true;
779 }
780 if (hdr->eomt)
781 msg->have_eomt = true;
782
783 return true;
784 }
785
786 /* this adds a chunk of msg to the builder to get the final msg */
drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx * msg,u8 * replybuf,u8 replybuflen)787 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
788 u8 *replybuf, u8 replybuflen)
789 {
790 u8 crc4;
791
792 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
793 msg->curchunk_idx += replybuflen;
794
795 if (msg->curchunk_idx >= msg->curchunk_len) {
796 /* do CRC */
797 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
798 if (crc4 != msg->chunk[msg->curchunk_len - 1])
799 print_hex_dump(KERN_DEBUG, "wrong crc",
800 DUMP_PREFIX_NONE, 16, 1,
801 msg->chunk, msg->curchunk_len, false);
802 /* copy chunk into bigger msg */
803 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
804 msg->curlen += msg->curchunk_len - 1;
805 }
806 return true;
807 }
808
drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)809 static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
810 struct drm_dp_sideband_msg_rx *raw,
811 struct drm_dp_sideband_msg_reply_body *repmsg)
812 {
813 int idx = 1;
814 int i;
815
816 import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]);
817 idx += 16;
818 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
819 idx++;
820 if (idx > raw->curlen)
821 goto fail_len;
822 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
823 if (raw->msg[idx] & 0x80)
824 repmsg->u.link_addr.ports[i].input_port = 1;
825
826 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
827 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
828
829 idx++;
830 if (idx > raw->curlen)
831 goto fail_len;
832 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
833 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
834 if (repmsg->u.link_addr.ports[i].input_port == 0)
835 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
836 idx++;
837 if (idx > raw->curlen)
838 goto fail_len;
839 if (repmsg->u.link_addr.ports[i].input_port == 0) {
840 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
841 idx++;
842 if (idx > raw->curlen)
843 goto fail_len;
844 import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]);
845 idx += 16;
846 if (idx > raw->curlen)
847 goto fail_len;
848 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
849 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
850 idx++;
851
852 }
853 if (idx > raw->curlen)
854 goto fail_len;
855 }
856
857 return true;
858 fail_len:
859 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
860 return false;
861 }
862
drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)863 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
864 struct drm_dp_sideband_msg_reply_body *repmsg)
865 {
866 int idx = 1;
867
868 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
869 idx++;
870 if (idx > raw->curlen)
871 goto fail_len;
872 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
873 idx++;
874 if (idx > raw->curlen)
875 goto fail_len;
876
877 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
878 return true;
879 fail_len:
880 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
881 return false;
882 }
883
drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)884 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
885 struct drm_dp_sideband_msg_reply_body *repmsg)
886 {
887 int idx = 1;
888
889 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
890 idx++;
891 if (idx > raw->curlen)
892 goto fail_len;
893 return true;
894 fail_len:
895 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
896 return false;
897 }
898
drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)899 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
900 struct drm_dp_sideband_msg_reply_body *repmsg)
901 {
902 int idx = 1;
903
904 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
905 idx++;
906 if (idx > raw->curlen)
907 goto fail_len;
908 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
909 idx++;
910 /* TODO check */
911 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
912 return true;
913 fail_len:
914 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
915 return false;
916 }
917
drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)918 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
919 struct drm_dp_sideband_msg_reply_body *repmsg)
920 {
921 int idx = 1;
922
923 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
924 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
925 idx++;
926 if (idx > raw->curlen)
927 goto fail_len;
928 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
929 idx += 2;
930 if (idx > raw->curlen)
931 goto fail_len;
932 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
933 idx += 2;
934 if (idx > raw->curlen)
935 goto fail_len;
936 return true;
937 fail_len:
938 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
939 return false;
940 }
941
drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)942 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
943 struct drm_dp_sideband_msg_reply_body *repmsg)
944 {
945 int idx = 1;
946
947 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
948 idx++;
949 if (idx > raw->curlen)
950 goto fail_len;
951 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
952 idx++;
953 if (idx > raw->curlen)
954 goto fail_len;
955 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
956 idx += 2;
957 if (idx > raw->curlen)
958 goto fail_len;
959 return true;
960 fail_len:
961 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
962 return false;
963 }
964
drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)965 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
966 struct drm_dp_sideband_msg_reply_body *repmsg)
967 {
968 int idx = 1;
969
970 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
971 idx++;
972 if (idx > raw->curlen)
973 goto fail_len;
974 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
975 idx += 2;
976 if (idx > raw->curlen)
977 goto fail_len;
978 return true;
979 fail_len:
980 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
981 return false;
982 }
983
drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)984 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
985 struct drm_dp_sideband_msg_reply_body *repmsg)
986 {
987 int idx = 1;
988
989 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
990 idx++;
991 if (idx > raw->curlen) {
992 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
993 idx, raw->curlen);
994 return false;
995 }
996 return true;
997 }
998
999 static bool
drm_dp_sideband_parse_query_stream_enc_status(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)1000 drm_dp_sideband_parse_query_stream_enc_status(
1001 struct drm_dp_sideband_msg_rx *raw,
1002 struct drm_dp_sideband_msg_reply_body *repmsg)
1003 {
1004 struct drm_dp_query_stream_enc_status_ack_reply *reply;
1005
1006 reply = &repmsg->u.enc_status;
1007
1008 reply->stream_id = raw->msg[3];
1009
1010 reply->reply_signed = raw->msg[2] & BIT(0);
1011
1012 /*
1013 * NOTE: It's my impression from reading the spec that the below parsing
1014 * is correct. However I noticed while testing with an HDCP 1.4 display
1015 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
1016 * would expect both bits to be set. So keep the parsing following the
1017 * spec, but beware reality might not match the spec (at least for some
1018 * configurations).
1019 */
1020 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1021 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1022
1023 reply->query_capable_device_present = raw->msg[2] & BIT(5);
1024 reply->legacy_device_present = raw->msg[2] & BIT(6);
1025 reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1026
1027 reply->auth_completed = !!(raw->msg[1] & BIT(3));
1028 reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1029 reply->repeater_present = !!(raw->msg[1] & BIT(5));
1030 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1031
1032 return true;
1033 }
1034
drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * msg)1035 static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
1036 struct drm_dp_sideband_msg_rx *raw,
1037 struct drm_dp_sideband_msg_reply_body *msg)
1038 {
1039 memset(msg, 0, sizeof(*msg));
1040 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
1041 msg->req_type = (raw->msg[0] & 0x7f);
1042
1043 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
1044 import_guid(&msg->u.nak.guid, &raw->msg[1]);
1045 msg->u.nak.reason = raw->msg[17];
1046 msg->u.nak.nak_data = raw->msg[18];
1047 return false;
1048 }
1049
1050 switch (msg->req_type) {
1051 case DP_LINK_ADDRESS:
1052 return drm_dp_sideband_parse_link_address(mgr, raw, msg);
1053 case DP_QUERY_PAYLOAD:
1054 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
1055 case DP_REMOTE_DPCD_READ:
1056 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
1057 case DP_REMOTE_DPCD_WRITE:
1058 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
1059 case DP_REMOTE_I2C_READ:
1060 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1061 case DP_REMOTE_I2C_WRITE:
1062 return true; /* since there's nothing to parse */
1063 case DP_ENUM_PATH_RESOURCES:
1064 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
1065 case DP_ALLOCATE_PAYLOAD:
1066 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
1067 case DP_POWER_DOWN_PHY:
1068 case DP_POWER_UP_PHY:
1069 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1070 case DP_CLEAR_PAYLOAD_ID_TABLE:
1071 return true; /* since there's nothing to parse */
1072 case DP_QUERY_STREAM_ENC_STATUS:
1073 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
1074 default:
1075 drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
1076 msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1077 return false;
1078 }
1079 }
1080
1081 static bool
drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)1082 drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1083 struct drm_dp_sideband_msg_rx *raw,
1084 struct drm_dp_sideband_msg_req_body *msg)
1085 {
1086 int idx = 1;
1087
1088 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1089 idx++;
1090 if (idx > raw->curlen)
1091 goto fail_len;
1092
1093 import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]);
1094 idx += 16;
1095 if (idx > raw->curlen)
1096 goto fail_len;
1097
1098 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
1099 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
1100 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
1101 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
1102 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1103 idx++;
1104 return true;
1105 fail_len:
1106 drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
1107 idx, raw->curlen);
1108 return false;
1109 }
1110
drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)1111 static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
1112 struct drm_dp_sideband_msg_rx *raw,
1113 struct drm_dp_sideband_msg_req_body *msg)
1114 {
1115 int idx = 1;
1116
1117 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1118 idx++;
1119 if (idx > raw->curlen)
1120 goto fail_len;
1121
1122 import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]);
1123 idx += 16;
1124 if (idx > raw->curlen)
1125 goto fail_len;
1126
1127 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1128 idx++;
1129 return true;
1130 fail_len:
1131 drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
1132 return false;
1133 }
1134
drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)1135 static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
1136 struct drm_dp_sideband_msg_rx *raw,
1137 struct drm_dp_sideband_msg_req_body *msg)
1138 {
1139 memset(msg, 0, sizeof(*msg));
1140 msg->req_type = (raw->msg[0] & 0x7f);
1141
1142 switch (msg->req_type) {
1143 case DP_CONNECTION_STATUS_NOTIFY:
1144 return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
1145 case DP_RESOURCE_STATUS_NOTIFY:
1146 return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
1147 default:
1148 drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
1149 msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
1150 return false;
1151 }
1152 }
1153
build_dpcd_write(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes,u8 * bytes)1154 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1155 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1156 {
1157 struct drm_dp_sideband_msg_req_body req;
1158
1159 req.req_type = DP_REMOTE_DPCD_WRITE;
1160 req.u.dpcd_write.port_number = port_num;
1161 req.u.dpcd_write.dpcd_address = offset;
1162 req.u.dpcd_write.num_bytes = num_bytes;
1163 req.u.dpcd_write.bytes = bytes;
1164 drm_dp_encode_sideband_req(&req, msg);
1165 }
1166
build_link_address(struct drm_dp_sideband_msg_tx * msg)1167 static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1168 {
1169 struct drm_dp_sideband_msg_req_body req;
1170
1171 req.req_type = DP_LINK_ADDRESS;
1172 drm_dp_encode_sideband_req(&req, msg);
1173 }
1174
build_clear_payload_id_table(struct drm_dp_sideband_msg_tx * msg)1175 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1176 {
1177 struct drm_dp_sideband_msg_req_body req;
1178
1179 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1180 drm_dp_encode_sideband_req(&req, msg);
1181 msg->path_msg = true;
1182 }
1183
build_enum_path_resources(struct drm_dp_sideband_msg_tx * msg,int port_num)1184 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1185 int port_num)
1186 {
1187 struct drm_dp_sideband_msg_req_body req;
1188
1189 req.req_type = DP_ENUM_PATH_RESOURCES;
1190 req.u.port_num.port_number = port_num;
1191 drm_dp_encode_sideband_req(&req, msg);
1192 msg->path_msg = true;
1193 return 0;
1194 }
1195
build_allocate_payload(struct drm_dp_sideband_msg_tx * msg,int port_num,u8 vcpi,uint16_t pbn,u8 number_sdp_streams,u8 * sdp_stream_sink)1196 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1197 int port_num,
1198 u8 vcpi, uint16_t pbn,
1199 u8 number_sdp_streams,
1200 u8 *sdp_stream_sink)
1201 {
1202 struct drm_dp_sideband_msg_req_body req;
1203
1204 memset(&req, 0, sizeof(req));
1205 req.req_type = DP_ALLOCATE_PAYLOAD;
1206 req.u.allocate_payload.port_number = port_num;
1207 req.u.allocate_payload.vcpi = vcpi;
1208 req.u.allocate_payload.pbn = pbn;
1209 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1210 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1211 number_sdp_streams);
1212 drm_dp_encode_sideband_req(&req, msg);
1213 msg->path_msg = true;
1214 }
1215
build_power_updown_phy(struct drm_dp_sideband_msg_tx * msg,int port_num,bool power_up)1216 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1217 int port_num, bool power_up)
1218 {
1219 struct drm_dp_sideband_msg_req_body req;
1220
1221 if (power_up)
1222 req.req_type = DP_POWER_UP_PHY;
1223 else
1224 req.req_type = DP_POWER_DOWN_PHY;
1225
1226 req.u.port_num.port_number = port_num;
1227 drm_dp_encode_sideband_req(&req, msg);
1228 msg->path_msg = true;
1229 }
1230
1231 static int
build_query_stream_enc_status(struct drm_dp_sideband_msg_tx * msg,u8 stream_id,u8 * q_id)1232 build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1233 u8 *q_id)
1234 {
1235 struct drm_dp_sideband_msg_req_body req;
1236
1237 req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1238 req.u.enc_status.stream_id = stream_id;
1239 memcpy(req.u.enc_status.client_id, q_id,
1240 sizeof(req.u.enc_status.client_id));
1241 req.u.enc_status.stream_event = 0;
1242 req.u.enc_status.valid_stream_event = false;
1243 req.u.enc_status.stream_behavior = 0;
1244 req.u.enc_status.valid_stream_behavior = false;
1245
1246 drm_dp_encode_sideband_req(&req, msg);
1247 return 0;
1248 }
1249
check_txmsg_state(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)1250 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1251 struct drm_dp_sideband_msg_tx *txmsg)
1252 {
1253 unsigned int state;
1254
1255 /*
1256 * All updates to txmsg->state are protected by mgr->qlock, and the two
1257 * cases we check here are terminal states. For those the barriers
1258 * provided by the wake_up/wait_event pair are enough.
1259 */
1260 state = READ_ONCE(txmsg->state);
1261 return (state == DRM_DP_SIDEBAND_TX_RX ||
1262 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1263 }
1264
drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch * mstb,struct drm_dp_sideband_msg_tx * txmsg)1265 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1266 struct drm_dp_sideband_msg_tx *txmsg)
1267 {
1268 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1269 unsigned long wait_timeout = msecs_to_jiffies(4000);
1270 unsigned long wait_expires = jiffies + wait_timeout;
1271 int ret;
1272
1273 for (;;) {
1274 /*
1275 * If the driver provides a way for this, change to
1276 * poll-waiting for the MST reply interrupt if we didn't receive
1277 * it for 50 msec. This would cater for cases where the HPD
1278 * pulse signal got lost somewhere, even though the sink raised
1279 * the corresponding MST interrupt correctly. One example is the
1280 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
1281 * filters out short pulses with a duration less than ~540 usec.
1282 *
1283 * The poll period is 50 msec to avoid missing an interrupt
1284 * after the sink has cleared it (after a 110msec timeout
1285 * since it raised the interrupt).
1286 */
1287 ret = wait_event_timeout(mgr->tx_waitq,
1288 check_txmsg_state(mgr, txmsg),
1289 mgr->cbs->poll_hpd_irq ?
1290 msecs_to_jiffies(50) :
1291 wait_timeout);
1292
1293 if (ret || !mgr->cbs->poll_hpd_irq ||
1294 time_after(jiffies, wait_expires))
1295 break;
1296
1297 mgr->cbs->poll_hpd_irq(mgr);
1298 }
1299
1300 mutex_lock(&mgr->qlock);
1301 if (ret > 0) {
1302 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1303 ret = -EIO;
1304 goto out;
1305 }
1306 } else {
1307 drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
1308 txmsg, txmsg->state, txmsg->seqno);
1309
1310 /* dump some state */
1311 ret = -EIO;
1312
1313 /* remove from q */
1314 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1315 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1316 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1317 list_del(&txmsg->next);
1318 }
1319 out:
1320 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1321 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
1322 DBG_PREFIX);
1323
1324 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1325 }
1326 mutex_unlock(&mgr->qlock);
1327
1328 drm_dp_mst_kick_tx(mgr);
1329 return ret;
1330 }
1331
drm_dp_add_mst_branch_device(u8 lct,u8 * rad)1332 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1333 {
1334 struct drm_dp_mst_branch *mstb;
1335
1336 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1337 if (!mstb)
1338 return NULL;
1339
1340 mstb->lct = lct;
1341 if (lct > 1)
1342 memcpy(mstb->rad, rad, lct / 2);
1343 INIT_LIST_HEAD(&mstb->ports);
1344 kref_init(&mstb->topology_kref);
1345 kref_init(&mstb->malloc_kref);
1346 return mstb;
1347 }
1348
drm_dp_free_mst_branch_device(struct kref * kref)1349 static void drm_dp_free_mst_branch_device(struct kref *kref)
1350 {
1351 struct drm_dp_mst_branch *mstb =
1352 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1353
1354 if (mstb->port_parent)
1355 drm_dp_mst_put_port_malloc(mstb->port_parent);
1356
1357 kfree(mstb);
1358 }
1359
1360 /**
1361 * DOC: Branch device and port refcounting
1362 *
1363 * Topology refcount overview
1364 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1365 *
1366 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1367 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1368 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1369 *
1370 * Topology refcounts are not exposed to drivers, and are handled internally
1371 * by the DP MST helpers. The helpers use them in order to prevent the
1372 * in-memory topology state from being changed in the middle of critical
1373 * operations like changing the internal state of payload allocations. This
1374 * means each branch and port will be considered to be connected to the rest
1375 * of the topology until its topology refcount reaches zero. Additionally,
1376 * for ports this means that their associated &struct drm_connector will stay
1377 * registered with userspace until the port's refcount reaches 0.
1378 *
1379 * Malloc refcount overview
1380 * ~~~~~~~~~~~~~~~~~~~~~~~~
1381 *
1382 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1383 * drm_dp_mst_branch allocated even after all of its topology references have
1384 * been dropped, so that the driver or MST helpers can safely access each
1385 * branch's last known state before it was disconnected from the topology.
1386 * When the malloc refcount of a port or branch reaches 0, the memory
1387 * allocation containing the &struct drm_dp_mst_branch or &struct
1388 * drm_dp_mst_port respectively will be freed.
1389 *
1390 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1391 * to drivers. As of writing this documentation, there are no drivers that
1392 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1393 * helpers. Exposing this API to drivers in a race-free manner would take more
1394 * tweaking of the refcounting scheme, however patches are welcome provided
1395 * there is a legitimate driver usecase for this.
1396 *
1397 * Refcount relationships in a topology
1398 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1399 *
1400 * Let's take a look at why the relationship between topology and malloc
1401 * refcounts is designed the way it is.
1402 *
1403 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1404 *
1405 * An example of topology and malloc refs in a DP MST topology with two
1406 * active payloads. Topology refcount increments are indicated by solid
1407 * lines, and malloc refcount increments are indicated by dashed lines.
1408 * Each starts from the branch which incremented the refcount, and ends at
1409 * the branch to which the refcount belongs to, i.e. the arrow points the
1410 * same way as the C pointers used to reference a structure.
1411 *
1412 * As you can see in the above figure, every branch increments the topology
1413 * refcount of its children, and increments the malloc refcount of its
1414 * parent. Additionally, every payload increments the malloc refcount of its
1415 * assigned port by 1.
1416 *
1417 * So, what would happen if MSTB #3 from the above figure was unplugged from
1418 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1419 * topology would start to look like the figure below.
1420 *
1421 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1422 *
1423 * Ports and branch devices which have been released from memory are
1424 * colored grey, and references which have been removed are colored red.
1425 *
1426 * Whenever a port or branch device's topology refcount reaches zero, it will
1427 * decrement the topology refcounts of all its children, the malloc refcount
1428 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1429 * #4, this means they both have been disconnected from the topology and freed
1430 * from memory. But, because payload #2 is still holding a reference to port
1431 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1432 * is still accessible from memory. This also means port #3 has not yet
1433 * decremented the malloc refcount of MSTB #3, so its &struct
1434 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1435 * malloc refcount reaches 0.
1436 *
1437 * This relationship is necessary because in order to release payload #2, we
1438 * need to be able to figure out the last relative of port #3 that's still
1439 * connected to the topology. In this case, we would travel up the topology as
1440 * shown below.
1441 *
1442 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1443 *
1444 * And finally, remove payload #2 by communicating with port #2 through
1445 * sideband transactions.
1446 */
1447
1448 /**
1449 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1450 * device
1451 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1452 *
1453 * Increments &drm_dp_mst_branch.malloc_kref. When
1454 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1455 * will be released and @mstb may no longer be used.
1456 *
1457 * See also: drm_dp_mst_put_mstb_malloc()
1458 */
1459 static void
drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch * mstb)1460 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1461 {
1462 kref_get(&mstb->malloc_kref);
1463 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1464 }
1465
1466 /**
1467 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1468 * device
1469 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1470 *
1471 * Decrements &drm_dp_mst_branch.malloc_kref. When
1472 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1473 * will be released and @mstb may no longer be used.
1474 *
1475 * See also: drm_dp_mst_get_mstb_malloc()
1476 */
1477 static void
drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch * mstb)1478 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1479 {
1480 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1481 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1482 }
1483
drm_dp_free_mst_port(struct kref * kref)1484 static void drm_dp_free_mst_port(struct kref *kref)
1485 {
1486 struct drm_dp_mst_port *port =
1487 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1488
1489 drm_dp_mst_put_mstb_malloc(port->parent);
1490 kfree(port);
1491 }
1492
1493 /**
1494 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1495 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1496 *
1497 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1498 * reaches 0, the memory allocation for @port will be released and @port may
1499 * no longer be used.
1500 *
1501 * Because @port could potentially be freed at any time by the DP MST helpers
1502 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1503 * function, drivers that which to make use of &struct drm_dp_mst_port should
1504 * ensure that they grab at least one main malloc reference to their MST ports
1505 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1506 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1507 *
1508 * See also: drm_dp_mst_put_port_malloc()
1509 */
1510 void
drm_dp_mst_get_port_malloc(struct drm_dp_mst_port * port)1511 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1512 {
1513 kref_get(&port->malloc_kref);
1514 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
1515 }
1516 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1517
1518 /**
1519 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1520 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1521 *
1522 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1523 * reaches 0, the memory allocation for @port will be released and @port may
1524 * no longer be used.
1525 *
1526 * See also: drm_dp_mst_get_port_malloc()
1527 */
1528 void
drm_dp_mst_put_port_malloc(struct drm_dp_mst_port * port)1529 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1530 {
1531 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1532 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1533 }
1534 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1535
1536 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1537
1538 #define STACK_DEPTH 8
1539
1540 static noinline void
__topology_ref_save(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_ref_history * history,enum drm_dp_mst_topology_ref_type type)1541 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1542 struct drm_dp_mst_topology_ref_history *history,
1543 enum drm_dp_mst_topology_ref_type type)
1544 {
1545 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1546 depot_stack_handle_t backtrace;
1547 ulong stack_entries[STACK_DEPTH];
1548 uint n;
1549 int i;
1550
1551 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1552 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1553 if (!backtrace)
1554 return;
1555
1556 /* Try to find an existing entry for this backtrace */
1557 for (i = 0; i < history->len; i++) {
1558 if (history->entries[i].backtrace == backtrace) {
1559 entry = &history->entries[i];
1560 break;
1561 }
1562 }
1563
1564 /* Otherwise add one */
1565 if (!entry) {
1566 struct drm_dp_mst_topology_ref_entry *new;
1567 int new_len = history->len + 1;
1568
1569 new = krealloc(history->entries, sizeof(*new) * new_len,
1570 GFP_KERNEL);
1571 if (!new)
1572 return;
1573
1574 entry = &new[history->len];
1575 history->len = new_len;
1576 history->entries = new;
1577
1578 entry->backtrace = backtrace;
1579 entry->type = type;
1580 entry->count = 0;
1581 }
1582 entry->count++;
1583 entry->ts_nsec = ktime_get_ns();
1584 }
1585
1586 static int
topology_ref_history_cmp(const void * a,const void * b)1587 topology_ref_history_cmp(const void *a, const void *b)
1588 {
1589 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1590
1591 if (entry_a->ts_nsec > entry_b->ts_nsec)
1592 return 1;
1593 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1594 return -1;
1595 else
1596 return 0;
1597 }
1598
1599 static inline const char *
topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)1600 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1601 {
1602 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1603 return "get";
1604 else
1605 return "put";
1606 }
1607
1608 static void
__dump_topology_ref_history(struct drm_device * drm,struct drm_dp_mst_topology_ref_history * history,void * ptr,const char * type_str)1609 __dump_topology_ref_history(struct drm_device *drm,
1610 struct drm_dp_mst_topology_ref_history *history,
1611 void *ptr, const char *type_str)
1612 {
1613 struct drm_printer p = drm_dbg_printer(drm, DRM_UT_DP, DBG_PREFIX);
1614 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1615 int i;
1616
1617 if (!buf)
1618 return;
1619
1620 if (!history->len)
1621 goto out;
1622
1623 /* First, sort the list so that it goes from oldest to newest
1624 * reference entry
1625 */
1626 sort(history->entries, history->len, sizeof(*history->entries),
1627 topology_ref_history_cmp, NULL);
1628
1629 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1630 type_str, ptr);
1631
1632 for (i = 0; i < history->len; i++) {
1633 const struct drm_dp_mst_topology_ref_entry *entry =
1634 &history->entries[i];
1635 u64 ts_nsec = entry->ts_nsec;
1636 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1637
1638 stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
1639
1640 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1641 entry->count,
1642 topology_ref_type_to_str(entry->type),
1643 ts_nsec, rem_nsec / 1000, buf);
1644 }
1645
1646 /* Now free the history, since this is the only time we expose it */
1647 kfree(history->entries);
1648 out:
1649 kfree(buf);
1650 }
1651
1652 static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch * mstb)1653 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1654 {
1655 __dump_topology_ref_history(mstb->mgr->dev, &mstb->topology_ref_history,
1656 mstb, "MSTB");
1657 }
1658
1659 static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port * port)1660 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1661 {
1662 __dump_topology_ref_history(port->mgr->dev, &port->topology_ref_history,
1663 port, "Port");
1664 }
1665
1666 static __always_inline void
save_mstb_topology_ref(struct drm_dp_mst_branch * mstb,enum drm_dp_mst_topology_ref_type type)1667 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1668 enum drm_dp_mst_topology_ref_type type)
1669 {
1670 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1671 }
1672
1673 static __always_inline void
save_port_topology_ref(struct drm_dp_mst_port * port,enum drm_dp_mst_topology_ref_type type)1674 save_port_topology_ref(struct drm_dp_mst_port *port,
1675 enum drm_dp_mst_topology_ref_type type)
1676 {
1677 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1678 }
1679
1680 static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr * mgr)1681 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1682 {
1683 mutex_lock(&mgr->topology_ref_history_lock);
1684 }
1685
1686 static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr * mgr)1687 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1688 {
1689 mutex_unlock(&mgr->topology_ref_history_lock);
1690 }
1691 #else
1692 static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr * mgr)1693 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1694 static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr * mgr)1695 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1696 static inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch * mstb)1697 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1698 static inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port * port)1699 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1700 #define save_mstb_topology_ref(mstb, type)
1701 #define save_port_topology_ref(port, type)
1702 #endif
1703
1704 struct drm_dp_mst_atomic_payload *
drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state * state,struct drm_dp_mst_port * port)1705 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
1706 struct drm_dp_mst_port *port)
1707 {
1708 struct drm_dp_mst_atomic_payload *payload;
1709
1710 list_for_each_entry(payload, &state->payloads, next)
1711 if (payload->port == port)
1712 return payload;
1713
1714 return NULL;
1715 }
1716 EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
1717
drm_dp_destroy_mst_branch_device(struct kref * kref)1718 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1719 {
1720 struct drm_dp_mst_branch *mstb =
1721 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1722 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1723
1724 drm_dp_mst_dump_mstb_topology_history(mstb);
1725
1726 INIT_LIST_HEAD(&mstb->destroy_next);
1727
1728 /*
1729 * This can get called under mgr->mutex, so we need to perform the
1730 * actual destruction of the mstb in another worker
1731 */
1732 mutex_lock(&mgr->delayed_destroy_lock);
1733 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1734 mutex_unlock(&mgr->delayed_destroy_lock);
1735 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1736 }
1737
1738 /**
1739 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1740 * branch device unless it's zero
1741 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1742 *
1743 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1744 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1745 * reached 0). Holding a topology reference implies that a malloc reference
1746 * will be held to @mstb as long as the user holds the topology reference.
1747 *
1748 * Care should be taken to ensure that the user has at least one malloc
1749 * reference to @mstb. If you already have a topology reference to @mstb, you
1750 * should use drm_dp_mst_topology_get_mstb() instead.
1751 *
1752 * See also:
1753 * drm_dp_mst_topology_get_mstb()
1754 * drm_dp_mst_topology_put_mstb()
1755 *
1756 * Returns:
1757 * * 1: A topology reference was grabbed successfully
1758 * * 0: @port is no longer in the topology, no reference was grabbed
1759 */
1760 static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch * mstb)1761 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1762 {
1763 int ret;
1764
1765 topology_ref_history_lock(mstb->mgr);
1766 ret = kref_get_unless_zero(&mstb->topology_kref);
1767 if (ret) {
1768 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1769 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1770 }
1771
1772 topology_ref_history_unlock(mstb->mgr);
1773
1774 return ret;
1775 }
1776
1777 /**
1778 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1779 * branch device
1780 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1781 *
1782 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1783 * not it's already reached 0. This is only valid to use in scenarios where
1784 * you are already guaranteed to have at least one active topology reference
1785 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1786 *
1787 * See also:
1788 * drm_dp_mst_topology_try_get_mstb()
1789 * drm_dp_mst_topology_put_mstb()
1790 */
drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch * mstb)1791 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1792 {
1793 topology_ref_history_lock(mstb->mgr);
1794
1795 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1796 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1797 kref_get(&mstb->topology_kref);
1798 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1799
1800 topology_ref_history_unlock(mstb->mgr);
1801 }
1802
1803 /**
1804 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1805 * device
1806 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1807 *
1808 * Releases a topology reference from @mstb by decrementing
1809 * &drm_dp_mst_branch.topology_kref.
1810 *
1811 * See also:
1812 * drm_dp_mst_topology_try_get_mstb()
1813 * drm_dp_mst_topology_get_mstb()
1814 */
1815 static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch * mstb)1816 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1817 {
1818 topology_ref_history_lock(mstb->mgr);
1819
1820 drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
1821 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1822
1823 topology_ref_history_unlock(mstb->mgr);
1824 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1825 }
1826
drm_dp_destroy_port(struct kref * kref)1827 static void drm_dp_destroy_port(struct kref *kref)
1828 {
1829 struct drm_dp_mst_port *port =
1830 container_of(kref, struct drm_dp_mst_port, topology_kref);
1831 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1832
1833 drm_dp_mst_dump_port_topology_history(port);
1834
1835 /* There's nothing that needs locking to destroy an input port yet */
1836 if (port->input) {
1837 drm_dp_mst_put_port_malloc(port);
1838 return;
1839 }
1840
1841 drm_edid_free(port->cached_edid);
1842
1843 /*
1844 * we can't destroy the connector here, as we might be holding the
1845 * mode_config.mutex from an EDID retrieval
1846 */
1847 mutex_lock(&mgr->delayed_destroy_lock);
1848 list_add(&port->next, &mgr->destroy_port_list);
1849 mutex_unlock(&mgr->delayed_destroy_lock);
1850 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1851 }
1852
1853 /**
1854 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1855 * port unless it's zero
1856 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1857 *
1858 * Attempts to grab a topology reference to @port, if it hasn't yet been
1859 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1860 * 0). Holding a topology reference implies that a malloc reference will be
1861 * held to @port as long as the user holds the topology reference.
1862 *
1863 * Care should be taken to ensure that the user has at least one malloc
1864 * reference to @port. If you already have a topology reference to @port, you
1865 * should use drm_dp_mst_topology_get_port() instead.
1866 *
1867 * See also:
1868 * drm_dp_mst_topology_get_port()
1869 * drm_dp_mst_topology_put_port()
1870 *
1871 * Returns:
1872 * * 1: A topology reference was grabbed successfully
1873 * * 0: @port is no longer in the topology, no reference was grabbed
1874 */
1875 static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port * port)1876 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1877 {
1878 int ret;
1879
1880 topology_ref_history_lock(port->mgr);
1881 ret = kref_get_unless_zero(&port->topology_kref);
1882 if (ret) {
1883 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1884 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1885 }
1886
1887 topology_ref_history_unlock(port->mgr);
1888 return ret;
1889 }
1890
1891 /**
1892 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1893 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1894 *
1895 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1896 * not it's already reached 0. This is only valid to use in scenarios where
1897 * you are already guaranteed to have at least one active topology reference
1898 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1899 *
1900 * See also:
1901 * drm_dp_mst_topology_try_get_port()
1902 * drm_dp_mst_topology_put_port()
1903 */
drm_dp_mst_topology_get_port(struct drm_dp_mst_port * port)1904 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1905 {
1906 topology_ref_history_lock(port->mgr);
1907
1908 WARN_ON(kref_read(&port->topology_kref) == 0);
1909 kref_get(&port->topology_kref);
1910 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
1911 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1912
1913 topology_ref_history_unlock(port->mgr);
1914 }
1915
1916 /**
1917 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1918 * @port: The &struct drm_dp_mst_port to release the topology reference from
1919 *
1920 * Releases a topology reference from @port by decrementing
1921 * &drm_dp_mst_port.topology_kref.
1922 *
1923 * See also:
1924 * drm_dp_mst_topology_try_get_port()
1925 * drm_dp_mst_topology_get_port()
1926 */
drm_dp_mst_topology_put_port(struct drm_dp_mst_port * port)1927 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1928 {
1929 topology_ref_history_lock(port->mgr);
1930
1931 drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
1932 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1933
1934 topology_ref_history_unlock(port->mgr);
1935 kref_put(&port->topology_kref, drm_dp_destroy_port);
1936 }
1937
1938 static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_branch * to_find)1939 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1940 struct drm_dp_mst_branch *to_find)
1941 {
1942 struct drm_dp_mst_port *port;
1943 struct drm_dp_mst_branch *rmstb;
1944
1945 if (to_find == mstb)
1946 return mstb;
1947
1948 list_for_each_entry(port, &mstb->ports, next) {
1949 if (port->mstb) {
1950 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1951 port->mstb, to_find);
1952 if (rmstb)
1953 return rmstb;
1954 }
1955 }
1956 return NULL;
1957 }
1958
1959 static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)1960 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1961 struct drm_dp_mst_branch *mstb)
1962 {
1963 struct drm_dp_mst_branch *rmstb = NULL;
1964
1965 mutex_lock(&mgr->lock);
1966 if (mgr->mst_primary) {
1967 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1968 mgr->mst_primary, mstb);
1969
1970 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1971 rmstb = NULL;
1972 }
1973 mutex_unlock(&mgr->lock);
1974 return rmstb;
1975 }
1976
1977 static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * to_find)1978 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1979 struct drm_dp_mst_port *to_find)
1980 {
1981 struct drm_dp_mst_port *port, *mport;
1982
1983 list_for_each_entry(port, &mstb->ports, next) {
1984 if (port == to_find)
1985 return port;
1986
1987 if (port->mstb) {
1988 mport = drm_dp_mst_topology_get_port_validated_locked(
1989 port->mstb, to_find);
1990 if (mport)
1991 return mport;
1992 }
1993 }
1994 return NULL;
1995 }
1996
1997 static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)1998 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1999 struct drm_dp_mst_port *port)
2000 {
2001 struct drm_dp_mst_port *rport = NULL;
2002
2003 mutex_lock(&mgr->lock);
2004 if (mgr->mst_primary) {
2005 rport = drm_dp_mst_topology_get_port_validated_locked(
2006 mgr->mst_primary, port);
2007
2008 if (rport && !drm_dp_mst_topology_try_get_port(rport))
2009 rport = NULL;
2010 }
2011 mutex_unlock(&mgr->lock);
2012 return rport;
2013 }
2014
drm_dp_get_port(struct drm_dp_mst_branch * mstb,u8 port_num)2015 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
2016 {
2017 struct drm_dp_mst_port *port;
2018 int ret;
2019
2020 list_for_each_entry(port, &mstb->ports, next) {
2021 if (port->port_num == port_num) {
2022 ret = drm_dp_mst_topology_try_get_port(port);
2023 return ret ? port : NULL;
2024 }
2025 }
2026
2027 return NULL;
2028 }
2029
2030 /*
2031 * calculate a new RAD for this MST branch device
2032 * if parent has an LCT of 2 then it has 1 nibble of RAD,
2033 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
2034 */
drm_dp_calculate_rad(struct drm_dp_mst_port * port,u8 * rad)2035 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
2036 u8 *rad)
2037 {
2038 int parent_lct = port->parent->lct;
2039 int shift = 4;
2040 int idx = (parent_lct - 1) / 2;
2041
2042 if (parent_lct > 1) {
2043 memcpy(rad, port->parent->rad, idx + 1);
2044 shift = (parent_lct % 2) ? 4 : 0;
2045 } else
2046 rad[0] = 0;
2047
2048 rad[idx] |= port->port_num << shift;
2049 return parent_lct + 1;
2050 }
2051
drm_dp_mst_is_end_device(u8 pdt,bool mcs)2052 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
2053 {
2054 switch (pdt) {
2055 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2056 case DP_PEER_DEVICE_SST_SINK:
2057 return true;
2058 case DP_PEER_DEVICE_MST_BRANCHING:
2059 /* For sst branch device */
2060 if (!mcs)
2061 return true;
2062
2063 return false;
2064 }
2065 return true;
2066 }
2067
2068 static int
drm_dp_port_set_pdt(struct drm_dp_mst_port * port,u8 new_pdt,bool new_mcs)2069 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2070 bool new_mcs)
2071 {
2072 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2073 struct drm_dp_mst_branch *mstb;
2074 u8 rad[8], lct;
2075 int ret = 0;
2076
2077 if (port->pdt == new_pdt && port->mcs == new_mcs)
2078 return 0;
2079
2080 /* Teardown the old pdt, if there is one */
2081 if (port->pdt != DP_PEER_DEVICE_NONE) {
2082 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2083 /*
2084 * If the new PDT would also have an i2c bus,
2085 * don't bother with reregistering it
2086 */
2087 if (new_pdt != DP_PEER_DEVICE_NONE &&
2088 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2089 port->pdt = new_pdt;
2090 port->mcs = new_mcs;
2091 return 0;
2092 }
2093
2094 /* remove i2c over sideband */
2095 drm_dp_mst_unregister_i2c_bus(port);
2096 } else {
2097 mutex_lock(&mgr->lock);
2098 drm_dp_mst_topology_put_mstb(port->mstb);
2099 port->mstb = NULL;
2100 mutex_unlock(&mgr->lock);
2101 }
2102 }
2103
2104 port->pdt = new_pdt;
2105 port->mcs = new_mcs;
2106
2107 if (port->pdt != DP_PEER_DEVICE_NONE) {
2108 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2109 /* add i2c over sideband */
2110 ret = drm_dp_mst_register_i2c_bus(port);
2111 } else {
2112 lct = drm_dp_calculate_rad(port, rad);
2113 mstb = drm_dp_add_mst_branch_device(lct, rad);
2114 if (!mstb) {
2115 ret = -ENOMEM;
2116 drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
2117 goto out;
2118 }
2119
2120 mutex_lock(&mgr->lock);
2121 port->mstb = mstb;
2122 mstb->mgr = port->mgr;
2123 mstb->port_parent = port;
2124
2125 /*
2126 * Make sure this port's memory allocation stays
2127 * around until its child MSTB releases it
2128 */
2129 drm_dp_mst_get_port_malloc(port);
2130 mutex_unlock(&mgr->lock);
2131
2132 /* And make sure we send a link address for this */
2133 ret = 1;
2134 }
2135 }
2136
2137 out:
2138 if (ret < 0)
2139 port->pdt = DP_PEER_DEVICE_NONE;
2140 return ret;
2141 }
2142
2143 /**
2144 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2145 * @aux: Fake sideband AUX CH
2146 * @offset: address of the (first) register to read
2147 * @buffer: buffer to store the register values
2148 * @size: number of bytes in @buffer
2149 *
2150 * Performs the same functionality for remote devices via
2151 * sideband messaging as drm_dp_dpcd_read() does for local
2152 * devices via actual AUX CH.
2153 *
2154 * Return: Number of bytes read, or negative error code on failure.
2155 */
drm_dp_mst_dpcd_read(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)2156 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2157 unsigned int offset, void *buffer, size_t size)
2158 {
2159 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2160 aux);
2161
2162 return drm_dp_send_dpcd_read(port->mgr, port,
2163 offset, size, buffer);
2164 }
2165
2166 /**
2167 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2168 * @aux: Fake sideband AUX CH
2169 * @offset: address of the (first) register to write
2170 * @buffer: buffer containing the values to write
2171 * @size: number of bytes in @buffer
2172 *
2173 * Performs the same functionality for remote devices via
2174 * sideband messaging as drm_dp_dpcd_write() does for local
2175 * devices via actual AUX CH.
2176 *
2177 * Return: number of bytes written on success, negative error code on failure.
2178 */
drm_dp_mst_dpcd_write(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)2179 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2180 unsigned int offset, void *buffer, size_t size)
2181 {
2182 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2183 aux);
2184
2185 return drm_dp_send_dpcd_write(port->mgr, port,
2186 offset, size, buffer);
2187 }
2188
drm_dp_check_mstb_guid(struct drm_dp_mst_branch * mstb,guid_t * guid)2189 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid)
2190 {
2191 int ret = 0;
2192
2193 guid_copy(&mstb->guid, guid);
2194
2195 if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) {
2196 struct drm_dp_aux *aux;
2197 u8 buf[UUID_SIZE];
2198
2199 export_guid(buf, &mstb->guid);
2200
2201 if (mstb->port_parent)
2202 aux = &mstb->port_parent->aux;
2203 else
2204 aux = mstb->mgr->aux;
2205
2206 ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf));
2207 }
2208
2209 return ret;
2210 }
2211
build_mst_prop_path(const struct drm_dp_mst_branch * mstb,int pnum,char * proppath,size_t proppath_size)2212 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2213 int pnum,
2214 char *proppath,
2215 size_t proppath_size)
2216 {
2217 int i;
2218 char temp[8];
2219
2220 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2221 for (i = 0; i < (mstb->lct - 1); i++) {
2222 int shift = (i % 2) ? 0 : 4;
2223 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2224
2225 snprintf(temp, sizeof(temp), "-%d", port_num);
2226 strlcat(proppath, temp, proppath_size);
2227 }
2228 snprintf(temp, sizeof(temp), "-%d", pnum);
2229 strlcat(proppath, temp, proppath_size);
2230 }
2231
2232 /**
2233 * drm_dp_mst_connector_late_register() - Late MST connector registration
2234 * @connector: The MST connector
2235 * @port: The MST port for this connector
2236 *
2237 * Helper to register the remote aux device for this MST port. Drivers should
2238 * call this from their mst connector's late_register hook to enable MST aux
2239 * devices.
2240 *
2241 * Return: 0 on success, negative error code on failure.
2242 */
drm_dp_mst_connector_late_register(struct drm_connector * connector,struct drm_dp_mst_port * port)2243 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2244 struct drm_dp_mst_port *port)
2245 {
2246 drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
2247 port->aux.name, connector->kdev->kobj.name);
2248
2249 port->aux.dev = connector->kdev;
2250 return drm_dp_aux_register_devnode(&port->aux);
2251 }
2252 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2253
2254 /**
2255 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2256 * @connector: The MST connector
2257 * @port: The MST port for this connector
2258 *
2259 * Helper to unregister the remote aux device for this MST port, registered by
2260 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2261 * connector's early_unregister hook.
2262 */
drm_dp_mst_connector_early_unregister(struct drm_connector * connector,struct drm_dp_mst_port * port)2263 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2264 struct drm_dp_mst_port *port)
2265 {
2266 drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
2267 port->aux.name, connector->kdev->kobj.name);
2268 drm_dp_aux_unregister_devnode(&port->aux);
2269 }
2270 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2271
2272 static void
drm_dp_mst_port_add_connector(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port)2273 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2274 struct drm_dp_mst_port *port)
2275 {
2276 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2277 char proppath[255];
2278 int ret;
2279
2280 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2281 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2282 if (!port->connector) {
2283 ret = -ENOMEM;
2284 goto error;
2285 }
2286
2287 if (port->pdt != DP_PEER_DEVICE_NONE &&
2288 drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2289 drm_dp_mst_port_is_logical(port))
2290 port->cached_edid = drm_edid_read_ddc(port->connector,
2291 &port->aux.ddc);
2292
2293 drm_connector_dynamic_register(port->connector);
2294 return;
2295
2296 error:
2297 drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
2298 }
2299
2300 /*
2301 * Drop a topology reference, and unlink the port from the in-memory topology
2302 * layout
2303 */
2304 static void
drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)2305 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2306 struct drm_dp_mst_port *port)
2307 {
2308 mutex_lock(&mgr->lock);
2309 port->parent->num_ports--;
2310 list_del(&port->next);
2311 mutex_unlock(&mgr->lock);
2312 drm_dp_mst_topology_put_port(port);
2313 }
2314
2315 static struct drm_dp_mst_port *
drm_dp_mst_add_port(struct drm_device * dev,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,u8 port_number)2316 drm_dp_mst_add_port(struct drm_device *dev,
2317 struct drm_dp_mst_topology_mgr *mgr,
2318 struct drm_dp_mst_branch *mstb, u8 port_number)
2319 {
2320 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2321
2322 if (!port)
2323 return NULL;
2324
2325 kref_init(&port->topology_kref);
2326 kref_init(&port->malloc_kref);
2327 port->parent = mstb;
2328 port->port_num = port_number;
2329 port->mgr = mgr;
2330 port->aux.name = "DPMST";
2331 port->aux.dev = dev->dev;
2332 port->aux.is_remote = true;
2333
2334 /* initialize the MST downstream port's AUX crc work queue */
2335 port->aux.drm_dev = dev;
2336 drm_dp_remote_aux_init(&port->aux);
2337
2338 /*
2339 * Make sure the memory allocation for our parent branch stays
2340 * around until our own memory allocation is released
2341 */
2342 drm_dp_mst_get_mstb_malloc(mstb);
2343
2344 return port;
2345 }
2346
2347 static int
drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch * mstb,struct drm_device * dev,struct drm_dp_link_addr_reply_port * port_msg)2348 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2349 struct drm_device *dev,
2350 struct drm_dp_link_addr_reply_port *port_msg)
2351 {
2352 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2353 struct drm_dp_mst_port *port;
2354 int ret;
2355 u8 new_pdt = DP_PEER_DEVICE_NONE;
2356 bool new_mcs = 0;
2357 bool created = false, send_link_addr = false, changed = false;
2358
2359 port = drm_dp_get_port(mstb, port_msg->port_number);
2360 if (!port) {
2361 port = drm_dp_mst_add_port(dev, mgr, mstb,
2362 port_msg->port_number);
2363 if (!port)
2364 return -ENOMEM;
2365 created = true;
2366 changed = true;
2367 } else if (!port->input && port_msg->input_port && port->connector) {
2368 /* Since port->connector can't be changed here, we create a
2369 * new port if input_port changes from 0 to 1
2370 */
2371 drm_dp_mst_topology_unlink_port(mgr, port);
2372 drm_dp_mst_topology_put_port(port);
2373 port = drm_dp_mst_add_port(dev, mgr, mstb,
2374 port_msg->port_number);
2375 if (!port)
2376 return -ENOMEM;
2377 changed = true;
2378 created = true;
2379 } else if (port->input && !port_msg->input_port) {
2380 changed = true;
2381 } else if (port->connector) {
2382 /* We're updating a port that's exposed to userspace, so do it
2383 * under lock
2384 */
2385 drm_modeset_lock(&mgr->base.lock, NULL);
2386
2387 changed = port->ddps != port_msg->ddps ||
2388 (port->ddps &&
2389 (port->ldps != port_msg->legacy_device_plug_status ||
2390 port->dpcd_rev != port_msg->dpcd_revision ||
2391 port->mcs != port_msg->mcs ||
2392 port->pdt != port_msg->peer_device_type ||
2393 port->num_sdp_stream_sinks !=
2394 port_msg->num_sdp_stream_sinks));
2395 }
2396
2397 port->input = port_msg->input_port;
2398 if (!port->input)
2399 new_pdt = port_msg->peer_device_type;
2400 new_mcs = port_msg->mcs;
2401 port->ddps = port_msg->ddps;
2402 port->ldps = port_msg->legacy_device_plug_status;
2403 port->dpcd_rev = port_msg->dpcd_revision;
2404 port->num_sdp_streams = port_msg->num_sdp_streams;
2405 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2406
2407 /* manage mstb port lists with mgr lock - take a reference
2408 for this list */
2409 if (created) {
2410 mutex_lock(&mgr->lock);
2411 drm_dp_mst_topology_get_port(port);
2412 list_add(&port->next, &mstb->ports);
2413 mstb->num_ports++;
2414 mutex_unlock(&mgr->lock);
2415 }
2416
2417 /*
2418 * Reprobe PBN caps on both hotplug, and when re-probing the link
2419 * for our parent mstb
2420 */
2421 if (port->ddps && !port->input) {
2422 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2423 port);
2424 if (ret == 1)
2425 changed = true;
2426 } else {
2427 port->full_pbn = 0;
2428 }
2429
2430 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2431 if (ret == 1) {
2432 send_link_addr = true;
2433 } else if (ret < 0) {
2434 drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
2435 goto fail;
2436 }
2437
2438 /*
2439 * If this port wasn't just created, then we're reprobing because
2440 * we're coming out of suspend. In this case, always resend the link
2441 * address if there's an MSTB on this port
2442 */
2443 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2444 port->mcs)
2445 send_link_addr = true;
2446
2447 if (port->connector)
2448 drm_modeset_unlock(&mgr->base.lock);
2449 else if (!port->input)
2450 drm_dp_mst_port_add_connector(mstb, port);
2451
2452 if (send_link_addr && port->mstb) {
2453 ret = drm_dp_send_link_address(mgr, port->mstb);
2454 if (ret == 1) /* MSTB below us changed */
2455 changed = true;
2456 else if (ret < 0)
2457 goto fail_put;
2458 }
2459
2460 /* put reference to this port */
2461 drm_dp_mst_topology_put_port(port);
2462 return changed;
2463
2464 fail:
2465 drm_dp_mst_topology_unlink_port(mgr, port);
2466 if (port->connector)
2467 drm_modeset_unlock(&mgr->base.lock);
2468 fail_put:
2469 drm_dp_mst_topology_put_port(port);
2470 return ret;
2471 }
2472
2473 static int
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch * mstb,struct drm_dp_connection_status_notify * conn_stat)2474 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2475 struct drm_dp_connection_status_notify *conn_stat)
2476 {
2477 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2478 struct drm_dp_mst_port *port;
2479 int old_ddps, ret;
2480 u8 new_pdt;
2481 bool new_mcs;
2482 bool dowork = false, create_connector = false;
2483
2484 port = drm_dp_get_port(mstb, conn_stat->port_number);
2485 if (!port)
2486 return 0;
2487
2488 if (port->connector) {
2489 if (!port->input && conn_stat->input_port) {
2490 /*
2491 * We can't remove a connector from an already exposed
2492 * port, so just throw the port out and make sure we
2493 * reprobe the link address of it's parent MSTB
2494 */
2495 drm_dp_mst_topology_unlink_port(mgr, port);
2496 mstb->link_address_sent = false;
2497 dowork = true;
2498 goto out;
2499 }
2500
2501 /* Locking is only needed if the port's exposed to userspace */
2502 drm_modeset_lock(&mgr->base.lock, NULL);
2503 } else if (port->input && !conn_stat->input_port) {
2504 create_connector = true;
2505 /* Reprobe link address so we get num_sdp_streams */
2506 mstb->link_address_sent = false;
2507 dowork = true;
2508 }
2509
2510 old_ddps = port->ddps;
2511 port->input = conn_stat->input_port;
2512 port->ldps = conn_stat->legacy_device_plug_status;
2513 port->ddps = conn_stat->displayport_device_plug_status;
2514
2515 if (old_ddps != port->ddps) {
2516 if (port->ddps && !port->input)
2517 drm_dp_send_enum_path_resources(mgr, mstb, port);
2518 else
2519 port->full_pbn = 0;
2520 }
2521
2522 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2523 new_mcs = conn_stat->message_capability_status;
2524 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2525 if (ret == 1) {
2526 dowork = true;
2527 } else if (ret < 0) {
2528 drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
2529 dowork = false;
2530 }
2531
2532 if (port->connector)
2533 drm_modeset_unlock(&mgr->base.lock);
2534 else if (create_connector)
2535 drm_dp_mst_port_add_connector(mstb, port);
2536
2537 out:
2538 drm_dp_mst_topology_put_port(port);
2539 return dowork;
2540 }
2541
drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr * mgr,u8 lct,u8 * rad)2542 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2543 u8 lct, u8 *rad)
2544 {
2545 struct drm_dp_mst_branch *mstb;
2546 struct drm_dp_mst_port *port;
2547 int i, ret;
2548 /* find the port by iterating down */
2549
2550 mutex_lock(&mgr->lock);
2551 mstb = mgr->mst_primary;
2552
2553 if (!mstb)
2554 goto out;
2555
2556 for (i = 1; i < lct; i++) {
2557 int port_num = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad);
2558
2559 list_for_each_entry(port, &mstb->ports, next) {
2560 if (port->port_num == port_num) {
2561 mstb = port->mstb;
2562 if (!mstb) {
2563 drm_err(mgr->dev,
2564 "failed to lookup MSTB with lct %d, rad %02x\n",
2565 lct, rad[0]);
2566 goto out;
2567 }
2568
2569 break;
2570 }
2571 }
2572 }
2573 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2574 if (!ret)
2575 mstb = NULL;
2576 out:
2577 mutex_unlock(&mgr->lock);
2578 return mstb;
2579 }
2580
2581 static struct drm_dp_mst_branch *
get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch * mstb,const guid_t * guid)2582 get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb,
2583 const guid_t *guid)
2584 {
2585 struct drm_dp_mst_branch *found_mstb;
2586 struct drm_dp_mst_port *port;
2587
2588 if (!mstb)
2589 return NULL;
2590
2591 if (guid_equal(&mstb->guid, guid))
2592 return mstb;
2593
2594 list_for_each_entry(port, &mstb->ports, next) {
2595 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2596
2597 if (found_mstb)
2598 return found_mstb;
2599 }
2600
2601 return NULL;
2602 }
2603
2604 static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr * mgr,const guid_t * guid)2605 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2606 const guid_t *guid)
2607 {
2608 struct drm_dp_mst_branch *mstb;
2609 int ret;
2610
2611 /* find the port by iterating down */
2612 mutex_lock(&mgr->lock);
2613
2614 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2615 if (mstb) {
2616 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2617 if (!ret)
2618 mstb = NULL;
2619 }
2620
2621 mutex_unlock(&mgr->lock);
2622 return mstb;
2623 }
2624
drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)2625 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2626 struct drm_dp_mst_branch *mstb)
2627 {
2628 struct drm_dp_mst_port *port;
2629 int ret;
2630 bool changed = false;
2631
2632 if (!mstb->link_address_sent) {
2633 ret = drm_dp_send_link_address(mgr, mstb);
2634 if (ret == 1)
2635 changed = true;
2636 else if (ret < 0)
2637 return ret;
2638 }
2639
2640 list_for_each_entry(port, &mstb->ports, next) {
2641 if (port->input || !port->ddps || !port->mstb)
2642 continue;
2643
2644 ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
2645 if (ret == 1)
2646 changed = true;
2647 else if (ret < 0)
2648 return ret;
2649 }
2650
2651 return changed;
2652 }
2653
drm_dp_mst_link_probe_work(struct work_struct * work)2654 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2655 {
2656 struct drm_dp_mst_topology_mgr *mgr =
2657 container_of(work, struct drm_dp_mst_topology_mgr, work);
2658 struct drm_device *dev = mgr->dev;
2659 struct drm_dp_mst_branch *mstb;
2660 int ret;
2661 bool clear_payload_id_table;
2662
2663 mutex_lock(&mgr->probe_lock);
2664
2665 mutex_lock(&mgr->lock);
2666 clear_payload_id_table = !mgr->payload_id_table_cleared;
2667 mgr->payload_id_table_cleared = true;
2668
2669 mstb = mgr->mst_primary;
2670 if (mstb) {
2671 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2672 if (!ret)
2673 mstb = NULL;
2674 }
2675 mutex_unlock(&mgr->lock);
2676 if (!mstb) {
2677 mutex_unlock(&mgr->probe_lock);
2678 return;
2679 }
2680
2681 /*
2682 * Certain branch devices seem to incorrectly report an available_pbn
2683 * of 0 on downstream sinks, even after clearing the
2684 * DP_PAYLOAD_ALLOCATE_* registers in
2685 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2686 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2687 * things work again.
2688 */
2689 if (clear_payload_id_table) {
2690 drm_dbg_kms(dev, "Clearing payload ID table\n");
2691 drm_dp_send_clear_payload_id_table(mgr, mstb);
2692 }
2693
2694 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2695 drm_dp_mst_topology_put_mstb(mstb);
2696
2697 mutex_unlock(&mgr->probe_lock);
2698 if (ret > 0)
2699 drm_kms_helper_hotplug_event(dev);
2700 }
2701
drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr * mgr)2702 static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr)
2703 {
2704 queue_work(system_long_wq, &mgr->work);
2705 }
2706
drm_dp_validate_guid(struct drm_dp_mst_topology_mgr * mgr,guid_t * guid)2707 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2708 guid_t *guid)
2709 {
2710 if (!guid_is_null(guid))
2711 return true;
2712
2713 guid_gen(guid);
2714
2715 return false;
2716 }
2717
build_dpcd_read(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes)2718 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2719 u8 port_num, u32 offset, u8 num_bytes)
2720 {
2721 struct drm_dp_sideband_msg_req_body req;
2722
2723 req.req_type = DP_REMOTE_DPCD_READ;
2724 req.u.dpcd_read.port_number = port_num;
2725 req.u.dpcd_read.dpcd_address = offset;
2726 req.u.dpcd_read.num_bytes = num_bytes;
2727 drm_dp_encode_sideband_req(&req, msg);
2728 }
2729
drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr * mgr,bool up,u8 * msg,int len)2730 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2731 bool up, u8 *msg, int len)
2732 {
2733 int ret;
2734 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2735 int tosend, total, offset;
2736 int retries = 0;
2737
2738 retry:
2739 total = len;
2740 offset = 0;
2741 do {
2742 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2743
2744 ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset,
2745 &msg[offset],
2746 tosend);
2747 if (ret == -EIO && retries < 5) {
2748 retries++;
2749 goto retry;
2750 } else if (ret < 0) {
2751 drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
2752
2753 return -EIO;
2754 }
2755 offset += tosend;
2756 total -= tosend;
2757 } while (total > 0);
2758 return 0;
2759 }
2760
set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr * hdr,struct drm_dp_sideband_msg_tx * txmsg)2761 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2762 struct drm_dp_sideband_msg_tx *txmsg)
2763 {
2764 struct drm_dp_mst_branch *mstb = txmsg->dst;
2765 u8 req_type;
2766
2767 req_type = txmsg->msg[0] & 0x7f;
2768 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2769 req_type == DP_RESOURCE_STATUS_NOTIFY ||
2770 req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
2771 hdr->broadcast = 1;
2772 else
2773 hdr->broadcast = 0;
2774 hdr->path_msg = txmsg->path_msg;
2775 if (hdr->broadcast) {
2776 hdr->lct = 1;
2777 hdr->lcr = 6;
2778 } else {
2779 hdr->lct = mstb->lct;
2780 hdr->lcr = mstb->lct - 1;
2781 }
2782
2783 memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
2784
2785 return 0;
2786 }
2787 /*
2788 * process a single block of the next message in the sideband queue
2789 */
process_single_tx_qlock(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg,bool up)2790 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2791 struct drm_dp_sideband_msg_tx *txmsg,
2792 bool up)
2793 {
2794 u8 chunk[48];
2795 struct drm_dp_sideband_msg_hdr hdr;
2796 int len, space, idx, tosend;
2797 int ret;
2798
2799 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2800 return 0;
2801
2802 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2803
2804 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2805 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2806
2807 /* make hdr from dst mst */
2808 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2809 if (ret < 0)
2810 return ret;
2811
2812 /* amount left to send in this message */
2813 len = txmsg->cur_len - txmsg->cur_offset;
2814
2815 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2816 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2817
2818 tosend = min(len, space);
2819 if (len == txmsg->cur_len)
2820 hdr.somt = 1;
2821 if (space >= len)
2822 hdr.eomt = 1;
2823
2824
2825 hdr.msg_len = tosend + 1;
2826 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2827 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2828 /* add crc at end */
2829 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2830 idx += tosend + 1;
2831
2832 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2833 if (ret) {
2834 if (drm_debug_enabled(DRM_UT_DP)) {
2835 struct drm_printer p = drm_dbg_printer(mgr->dev,
2836 DRM_UT_DP,
2837 DBG_PREFIX);
2838
2839 drm_printf(&p, "sideband msg failed to send\n");
2840 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2841 }
2842 return ret;
2843 }
2844
2845 txmsg->cur_offset += tosend;
2846 if (txmsg->cur_offset == txmsg->cur_len) {
2847 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2848 return 1;
2849 }
2850 return 0;
2851 }
2852
process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr * mgr)2853 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2854 {
2855 struct drm_dp_sideband_msg_tx *txmsg;
2856 int ret;
2857
2858 WARN_ON(!mutex_is_locked(&mgr->qlock));
2859
2860 /* construct a chunk from the first msg in the tx_msg queue */
2861 if (list_empty(&mgr->tx_msg_downq))
2862 return;
2863
2864 txmsg = list_first_entry(&mgr->tx_msg_downq,
2865 struct drm_dp_sideband_msg_tx, next);
2866 ret = process_single_tx_qlock(mgr, txmsg, false);
2867 if (ret < 0) {
2868 drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
2869 list_del(&txmsg->next);
2870 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2871 wake_up_all(&mgr->tx_waitq);
2872 }
2873 }
2874
drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)2875 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2876 struct drm_dp_sideband_msg_tx *txmsg)
2877 {
2878 mutex_lock(&mgr->qlock);
2879 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2880
2881 if (drm_debug_enabled(DRM_UT_DP)) {
2882 struct drm_printer p = drm_dbg_printer(mgr->dev, DRM_UT_DP,
2883 DBG_PREFIX);
2884
2885 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2886 }
2887
2888 if (list_is_singular(&mgr->tx_msg_downq))
2889 process_single_down_tx_qlock(mgr);
2890 mutex_unlock(&mgr->qlock);
2891 }
2892
2893 static void
drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_link_address_ack_reply * reply)2894 drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
2895 struct drm_dp_link_address_ack_reply *reply)
2896 {
2897 struct drm_dp_link_addr_reply_port *port_reply;
2898 int i;
2899
2900 for (i = 0; i < reply->nports; i++) {
2901 port_reply = &reply->ports[i];
2902 drm_dbg_kms(mgr->dev,
2903 "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2904 i,
2905 port_reply->input_port,
2906 port_reply->peer_device_type,
2907 port_reply->port_number,
2908 port_reply->dpcd_revision,
2909 port_reply->mcs,
2910 port_reply->ddps,
2911 port_reply->legacy_device_plug_status,
2912 port_reply->num_sdp_streams,
2913 port_reply->num_sdp_stream_sinks);
2914 }
2915 }
2916
drm_dp_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)2917 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2918 struct drm_dp_mst_branch *mstb)
2919 {
2920 struct drm_dp_sideband_msg_tx *txmsg;
2921 struct drm_dp_link_address_ack_reply *reply;
2922 struct drm_dp_mst_port *port, *tmp;
2923 int i, ret, port_mask = 0;
2924 bool changed = false;
2925
2926 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2927 if (!txmsg)
2928 return -ENOMEM;
2929
2930 txmsg->dst = mstb;
2931 build_link_address(txmsg);
2932
2933 mstb->link_address_sent = true;
2934 drm_dp_queue_down_tx(mgr, txmsg);
2935
2936 /* FIXME: Actually do some real error handling here */
2937 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2938 if (ret < 0) {
2939 drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
2940 goto out;
2941 }
2942 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2943 drm_err(mgr->dev, "link address NAK received\n");
2944 ret = -EIO;
2945 goto out;
2946 }
2947
2948 reply = &txmsg->reply.u.link_addr;
2949 drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
2950 drm_dp_dump_link_address(mgr, reply);
2951
2952 ret = drm_dp_check_mstb_guid(mstb, &reply->guid);
2953 if (ret) {
2954 char buf[64];
2955
2956 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2957 drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
2958 goto out;
2959 }
2960
2961 for (i = 0; i < reply->nports; i++) {
2962 port_mask |= BIT(reply->ports[i].port_number);
2963 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2964 &reply->ports[i]);
2965 if (ret == 1)
2966 changed = true;
2967 else if (ret < 0)
2968 goto out;
2969 }
2970
2971 /* Prune any ports that are currently a part of mstb in our in-memory
2972 * topology, but were not seen in this link address. Usually this
2973 * means that they were removed while the topology was out of sync,
2974 * e.g. during suspend/resume
2975 */
2976 mutex_lock(&mgr->lock);
2977 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2978 if (port_mask & BIT(port->port_num))
2979 continue;
2980
2981 drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
2982 port->port_num);
2983 list_del(&port->next);
2984 drm_dp_mst_topology_put_port(port);
2985 changed = true;
2986 }
2987 mutex_unlock(&mgr->lock);
2988
2989 out:
2990 if (ret < 0)
2991 mstb->link_address_sent = false;
2992 kfree(txmsg);
2993 return ret < 0 ? ret : changed;
2994 }
2995
2996 static void
drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)2997 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2998 struct drm_dp_mst_branch *mstb)
2999 {
3000 struct drm_dp_sideband_msg_tx *txmsg;
3001 int ret;
3002
3003 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3004 if (!txmsg)
3005 return;
3006
3007 txmsg->dst = mstb;
3008 build_clear_payload_id_table(txmsg);
3009
3010 drm_dp_queue_down_tx(mgr, txmsg);
3011
3012 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3013 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3014 drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
3015
3016 kfree(txmsg);
3017 }
3018
3019 static int
drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port)3020 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3021 struct drm_dp_mst_branch *mstb,
3022 struct drm_dp_mst_port *port)
3023 {
3024 struct drm_dp_enum_path_resources_ack_reply *path_res;
3025 struct drm_dp_sideband_msg_tx *txmsg;
3026 int ret;
3027
3028 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3029 if (!txmsg)
3030 return -ENOMEM;
3031
3032 txmsg->dst = mstb;
3033 build_enum_path_resources(txmsg, port->port_num);
3034
3035 drm_dp_queue_down_tx(mgr, txmsg);
3036
3037 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3038 if (ret > 0) {
3039 ret = 0;
3040 path_res = &txmsg->reply.u.path_resources;
3041
3042 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3043 drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
3044 } else {
3045 if (port->port_num != path_res->port_number)
3046 DRM_ERROR("got incorrect port in response\n");
3047
3048 drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
3049 path_res->port_number,
3050 path_res->full_payload_bw_number,
3051 path_res->avail_payload_bw_number);
3052
3053 /*
3054 * If something changed, make sure we send a
3055 * hotplug
3056 */
3057 if (port->full_pbn != path_res->full_payload_bw_number ||
3058 port->fec_capable != path_res->fec_capable)
3059 ret = 1;
3060
3061 port->full_pbn = path_res->full_payload_bw_number;
3062 port->fec_capable = path_res->fec_capable;
3063 }
3064 }
3065
3066 kfree(txmsg);
3067 return ret;
3068 }
3069
drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch * mstb)3070 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3071 {
3072 if (!mstb->port_parent)
3073 return NULL;
3074
3075 if (mstb->port_parent->mstb != mstb)
3076 return mstb->port_parent;
3077
3078 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3079 }
3080
3081 /*
3082 * Searches upwards in the topology starting from mstb to try to find the
3083 * closest available parent of mstb that's still connected to the rest of the
3084 * topology. This can be used in order to perform operations like releasing
3085 * payloads, where the branch device which owned the payload may no longer be
3086 * around and thus would require that the payload on the last living relative
3087 * be freed instead.
3088 */
3089 static struct drm_dp_mst_branch *
drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int * port_num)3090 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3091 struct drm_dp_mst_branch *mstb,
3092 int *port_num)
3093 {
3094 struct drm_dp_mst_branch *rmstb = NULL;
3095 struct drm_dp_mst_port *found_port;
3096
3097 mutex_lock(&mgr->lock);
3098 if (!mgr->mst_primary)
3099 goto out;
3100
3101 do {
3102 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3103 if (!found_port)
3104 break;
3105
3106 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3107 rmstb = found_port->parent;
3108 *port_num = found_port->port_num;
3109 } else {
3110 /* Search again, starting from this parent */
3111 mstb = found_port->parent;
3112 }
3113 } while (!rmstb);
3114 out:
3115 mutex_unlock(&mgr->lock);
3116 return rmstb;
3117 }
3118
drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,int pbn)3119 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3120 struct drm_dp_mst_port *port,
3121 int id,
3122 int pbn)
3123 {
3124 struct drm_dp_sideband_msg_tx *txmsg;
3125 struct drm_dp_mst_branch *mstb;
3126 int ret, port_num;
3127 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3128 int i;
3129
3130 port_num = port->port_num;
3131 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3132 if (!mstb) {
3133 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3134 port->parent,
3135 &port_num);
3136
3137 if (!mstb)
3138 return -EINVAL;
3139 }
3140
3141 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3142 if (!txmsg) {
3143 ret = -ENOMEM;
3144 goto fail_put;
3145 }
3146
3147 for (i = 0; i < port->num_sdp_streams; i++)
3148 sinks[i] = i;
3149
3150 txmsg->dst = mstb;
3151 build_allocate_payload(txmsg, port_num,
3152 id,
3153 pbn, port->num_sdp_streams, sinks);
3154
3155 drm_dp_queue_down_tx(mgr, txmsg);
3156
3157 /*
3158 * FIXME: there is a small chance that between getting the last
3159 * connected mstb and sending the payload message, the last connected
3160 * mstb could also be removed from the topology. In the future, this
3161 * needs to be fixed by restarting the
3162 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3163 * timeout if the topology is still connected to the system.
3164 */
3165 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3166 if (ret > 0) {
3167 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3168 ret = -EINVAL;
3169 else
3170 ret = 0;
3171 }
3172 kfree(txmsg);
3173 fail_put:
3174 drm_dp_mst_topology_put_mstb(mstb);
3175 return ret;
3176 }
3177
drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,bool power_up)3178 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3179 struct drm_dp_mst_port *port, bool power_up)
3180 {
3181 struct drm_dp_sideband_msg_tx *txmsg;
3182 int ret;
3183
3184 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3185 if (!port)
3186 return -EINVAL;
3187
3188 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3189 if (!txmsg) {
3190 drm_dp_mst_topology_put_port(port);
3191 return -ENOMEM;
3192 }
3193
3194 txmsg->dst = port->parent;
3195 build_power_updown_phy(txmsg, port->port_num, power_up);
3196 drm_dp_queue_down_tx(mgr, txmsg);
3197
3198 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3199 if (ret > 0) {
3200 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3201 ret = -EINVAL;
3202 else
3203 ret = 0;
3204 }
3205 kfree(txmsg);
3206 drm_dp_mst_topology_put_port(port);
3207
3208 return ret;
3209 }
3210 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3211
drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,struct drm_dp_query_stream_enc_status_ack_reply * status)3212 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3213 struct drm_dp_mst_port *port,
3214 struct drm_dp_query_stream_enc_status_ack_reply *status)
3215 {
3216 struct drm_dp_mst_topology_state *state;
3217 struct drm_dp_mst_atomic_payload *payload;
3218 struct drm_dp_sideband_msg_tx *txmsg;
3219 u8 nonce[7];
3220 int ret;
3221
3222 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3223 if (!txmsg)
3224 return -ENOMEM;
3225
3226 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3227 if (!port) {
3228 ret = -EINVAL;
3229 goto out_get_port;
3230 }
3231
3232 get_random_bytes(nonce, sizeof(nonce));
3233
3234 drm_modeset_lock(&mgr->base.lock, NULL);
3235 state = to_drm_dp_mst_topology_state(mgr->base.state);
3236 payload = drm_atomic_get_mst_payload_state(state, port);
3237
3238 /*
3239 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
3240 * transaction at the MST Branch device directly connected to the
3241 * Source"
3242 */
3243 txmsg->dst = mgr->mst_primary;
3244
3245 build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
3246
3247 drm_dp_queue_down_tx(mgr, txmsg);
3248
3249 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3250 if (ret < 0) {
3251 goto out;
3252 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3253 drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3254 ret = -ENXIO;
3255 goto out;
3256 }
3257
3258 ret = 0;
3259 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3260
3261 out:
3262 drm_modeset_unlock(&mgr->base.lock);
3263 drm_dp_mst_topology_put_port(port);
3264 out_get_port:
3265 kfree(txmsg);
3266 return ret;
3267 }
3268 EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
3269
drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_atomic_payload * payload)3270 static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
3271 struct drm_dp_mst_atomic_payload *payload)
3272 {
3273 return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,
3274 payload->time_slots);
3275 }
3276
drm_dp_create_payload_to_remote(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_atomic_payload * payload)3277 static int drm_dp_create_payload_to_remote(struct drm_dp_mst_topology_mgr *mgr,
3278 struct drm_dp_mst_atomic_payload *payload)
3279 {
3280 int ret;
3281 struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3282
3283 if (!port)
3284 return -EIO;
3285
3286 ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
3287 drm_dp_mst_topology_put_port(port);
3288 return ret;
3289 }
3290
drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_atomic_payload * payload)3291 static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_mgr *mgr,
3292 struct drm_dp_mst_topology_state *mst_state,
3293 struct drm_dp_mst_atomic_payload *payload)
3294 {
3295 drm_dbg_kms(mgr->dev, "\n");
3296
3297 /* it's okay for these to fail */
3298 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE) {
3299 drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
3300 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
3301 }
3302
3303 if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
3304 drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);
3305 }
3306
3307 /**
3308 * drm_dp_add_payload_part1() - Execute payload update part 1
3309 * @mgr: Manager to use.
3310 * @mst_state: The MST atomic state
3311 * @payload: The payload to write
3312 *
3313 * Determines the starting time slot for the given payload, and programs the VCPI for this payload
3314 * into the DPCD of DPRX. After calling this, the driver should generate ACT and payload packets.
3315 *
3316 * Returns: 0 on success, error code on failure.
3317 */
drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_atomic_payload * payload)3318 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
3319 struct drm_dp_mst_topology_state *mst_state,
3320 struct drm_dp_mst_atomic_payload *payload)
3321 {
3322 struct drm_dp_mst_port *port;
3323 int ret;
3324
3325 /* Update mst mgr info */
3326 if (mgr->payload_count == 0)
3327 mgr->next_start_slot = mst_state->start_slot;
3328
3329 payload->vc_start_slot = mgr->next_start_slot;
3330
3331 mgr->payload_count++;
3332 mgr->next_start_slot += payload->time_slots;
3333
3334 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;
3335
3336 /* Allocate payload to immediate downstream facing port */
3337 port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
3338 if (!port) {
3339 drm_dbg_kms(mgr->dev,
3340 "VCPI %d for port %p not in topology, not creating a payload to remote\n",
3341 payload->vcpi, payload->port);
3342 return -EIO;
3343 }
3344
3345 ret = drm_dp_create_payload_at_dfp(mgr, payload);
3346 if (ret < 0) {
3347 drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n",
3348 payload->port, ret);
3349 goto put_port;
3350 }
3351
3352 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
3353
3354 put_port:
3355 drm_dp_mst_topology_put_port(port);
3356
3357 return ret;
3358 }
3359 EXPORT_SYMBOL(drm_dp_add_payload_part1);
3360
3361 /**
3362 * drm_dp_remove_payload_part1() - Remove an MST payload along the virtual channel
3363 * @mgr: Manager to use.
3364 * @mst_state: The MST atomic state
3365 * @payload: The payload to remove
3366 *
3367 * Removes a payload along the virtual channel if it was successfully allocated.
3368 * After calling this, the driver should set HW to generate ACT and then switch to new
3369 * payload allocation state.
3370 */
drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_atomic_payload * payload)3371 void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
3372 struct drm_dp_mst_topology_state *mst_state,
3373 struct drm_dp_mst_atomic_payload *payload)
3374 {
3375 /* Remove remote payload allocation */
3376 bool send_remove = false;
3377
3378 mutex_lock(&mgr->lock);
3379 send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
3380 mutex_unlock(&mgr->lock);
3381
3382 if (send_remove)
3383 drm_dp_destroy_payload_at_remote_and_dfp(mgr, mst_state, payload);
3384 else
3385 drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
3386 payload->vcpi);
3387
3388 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;
3389 }
3390 EXPORT_SYMBOL(drm_dp_remove_payload_part1);
3391
3392 /**
3393 * drm_dp_remove_payload_part2() - Remove an MST payload locally
3394 * @mgr: Manager to use.
3395 * @mst_state: The MST atomic state
3396 * @old_payload: The payload with its old state
3397 * @new_payload: The payload with its latest state
3398 *
3399 * Updates the starting time slots of all other payloads which would have been shifted towards
3400 * the start of the payload ID table as a result of removing a payload. Driver should call this
3401 * function whenever it removes a payload in its HW. It's independent to the result of payload
3402 * allocation/deallocation at branch devices along the virtual channel.
3403 */
drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,const struct drm_dp_mst_atomic_payload * old_payload,struct drm_dp_mst_atomic_payload * new_payload)3404 void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
3405 struct drm_dp_mst_topology_state *mst_state,
3406 const struct drm_dp_mst_atomic_payload *old_payload,
3407 struct drm_dp_mst_atomic_payload *new_payload)
3408 {
3409 struct drm_dp_mst_atomic_payload *pos;
3410
3411 /* Remove local payload allocation */
3412 list_for_each_entry(pos, &mst_state->payloads, next) {
3413 if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
3414 pos->vc_start_slot -= old_payload->time_slots;
3415 }
3416 new_payload->vc_start_slot = -1;
3417
3418 mgr->payload_count--;
3419 mgr->next_start_slot -= old_payload->time_slots;
3420
3421 if (new_payload->delete)
3422 drm_dp_mst_put_port_malloc(new_payload->port);
3423
3424 new_payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;
3425 }
3426 EXPORT_SYMBOL(drm_dp_remove_payload_part2);
3427 /**
3428 * drm_dp_add_payload_part2() - Execute payload update part 2
3429 * @mgr: Manager to use.
3430 * @payload: The payload to update
3431 *
3432 * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
3433 * function will send the sideband messages to finish allocating this payload.
3434 *
3435 * Returns: 0 on success, negative error code on failure.
3436 */
drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_atomic_payload * payload)3437 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
3438 struct drm_dp_mst_atomic_payload *payload)
3439 {
3440 int ret = 0;
3441
3442 /* Skip failed payloads */
3443 if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
3444 drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
3445 payload->port->connector->name);
3446 return -EIO;
3447 }
3448
3449 /* Allocate payload to remote end */
3450 ret = drm_dp_create_payload_to_remote(mgr, payload);
3451 if (ret < 0)
3452 drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
3453 payload->port, ret);
3454 else
3455 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE;
3456
3457 return ret;
3458 }
3459 EXPORT_SYMBOL(drm_dp_add_payload_part2);
3460
drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)3461 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3462 struct drm_dp_mst_port *port,
3463 int offset, int size, u8 *bytes)
3464 {
3465 int ret = 0;
3466 struct drm_dp_sideband_msg_tx *txmsg;
3467 struct drm_dp_mst_branch *mstb;
3468
3469 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3470 if (!mstb)
3471 return -EINVAL;
3472
3473 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3474 if (!txmsg) {
3475 ret = -ENOMEM;
3476 goto fail_put;
3477 }
3478
3479 build_dpcd_read(txmsg, port->port_num, offset, size);
3480 txmsg->dst = port->parent;
3481
3482 drm_dp_queue_down_tx(mgr, txmsg);
3483
3484 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3485 if (ret < 0)
3486 goto fail_free;
3487
3488 if (txmsg->reply.reply_type == 1) {
3489 drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3490 mstb, port->port_num, offset, size);
3491 ret = -EIO;
3492 goto fail_free;
3493 }
3494
3495 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3496 ret = -EPROTO;
3497 goto fail_free;
3498 }
3499
3500 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3501 size);
3502 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3503
3504 fail_free:
3505 kfree(txmsg);
3506 fail_put:
3507 drm_dp_mst_topology_put_mstb(mstb);
3508
3509 return ret;
3510 }
3511
drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)3512 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3513 struct drm_dp_mst_port *port,
3514 int offset, int size, u8 *bytes)
3515 {
3516 int ret;
3517 struct drm_dp_sideband_msg_tx *txmsg;
3518 struct drm_dp_mst_branch *mstb;
3519
3520 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3521 if (!mstb)
3522 return -EINVAL;
3523
3524 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3525 if (!txmsg) {
3526 ret = -ENOMEM;
3527 goto fail_put;
3528 }
3529
3530 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3531 txmsg->dst = mstb;
3532
3533 drm_dp_queue_down_tx(mgr, txmsg);
3534
3535 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3536 if (ret > 0) {
3537 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3538 ret = -EIO;
3539 else
3540 ret = size;
3541 }
3542
3543 kfree(txmsg);
3544 fail_put:
3545 drm_dp_mst_topology_put_mstb(mstb);
3546 return ret;
3547 }
3548
drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx * msg,u8 req_type)3549 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3550 {
3551 struct drm_dp_sideband_msg_reply_body reply;
3552
3553 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3554 reply.req_type = req_type;
3555 drm_dp_encode_sideband_reply(&reply, msg);
3556 return 0;
3557 }
3558
drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int req_type,bool broadcast)3559 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3560 struct drm_dp_mst_branch *mstb,
3561 int req_type, bool broadcast)
3562 {
3563 struct drm_dp_sideband_msg_tx *txmsg;
3564
3565 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3566 if (!txmsg)
3567 return -ENOMEM;
3568
3569 txmsg->dst = mstb;
3570 drm_dp_encode_up_ack_reply(txmsg, req_type);
3571
3572 mutex_lock(&mgr->qlock);
3573 /* construct a chunk from the first msg in the tx_msg queue */
3574 process_single_tx_qlock(mgr, txmsg, true);
3575 mutex_unlock(&mgr->qlock);
3576
3577 kfree(txmsg);
3578 return 0;
3579 }
3580
3581 /**
3582 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
3583 * @link_rate: link rate in 10kbits/s units
3584 * @link_lane_count: lane count
3585 *
3586 * Calculate the total bandwidth of a MultiStream Transport link. The returned
3587 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
3588 * convert the number of PBNs required for a given stream to the number of
3589 * timeslots this stream requires in each MTP.
3590 *
3591 * Returns the BW / timeslot value in 20.12 fixed point format.
3592 */
drm_dp_get_vc_payload_bw(int link_rate,int link_lane_count)3593 fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
3594 {
3595 int ch_coding_efficiency =
3596 drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
3597 fixed20_12 ret;
3598
3599 /* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
3600 ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
3601 ch_coding_efficiency),
3602 (1000000ULL * 8 * 5400) >> 12);
3603
3604 return ret;
3605 }
3606 EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3607
3608 /**
3609 * drm_dp_read_mst_cap() - Read the sink's MST mode capability
3610 * @aux: The DP AUX channel to use
3611 * @dpcd: A cached copy of the DPCD capabilities for this sink
3612 *
3613 * Returns: enum drm_dp_mst_mode to indicate MST mode capability
3614 */
drm_dp_read_mst_cap(struct drm_dp_aux * aux,const u8 dpcd[DP_RECEIVER_CAP_SIZE])3615 enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3616 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3617 {
3618 u8 mstm_cap;
3619
3620 if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
3621 return DRM_DP_SST;
3622
3623 if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0)
3624 return DRM_DP_SST;
3625
3626 if (mstm_cap & DP_MST_CAP)
3627 return DRM_DP_MST;
3628
3629 if (mstm_cap & DP_SINGLE_STREAM_SIDEBAND_MSG)
3630 return DRM_DP_SST_SIDEBAND_MSG;
3631
3632 return DRM_DP_SST;
3633 }
3634 EXPORT_SYMBOL(drm_dp_read_mst_cap);
3635
3636 /**
3637 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3638 * @mgr: manager to set state for
3639 * @mst_state: true to enable MST on this connector - false to disable.
3640 *
3641 * This is called by the driver when it detects an MST capable device plugged
3642 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3643 */
drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr * mgr,bool mst_state)3644 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3645 {
3646 int ret = 0;
3647 struct drm_dp_mst_branch *mstb = NULL;
3648
3649 mutex_lock(&mgr->lock);
3650 if (mst_state == mgr->mst_state)
3651 goto out_unlock;
3652
3653 mgr->mst_state = mst_state;
3654 /* set the device into MST mode */
3655 if (mst_state) {
3656 WARN_ON(mgr->mst_primary);
3657
3658 /* get dpcd info */
3659 ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
3660 if (ret < 0) {
3661 drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
3662 mgr->aux->name, ret);
3663 goto out_unlock;
3664 }
3665
3666 /* add initial branch device at LCT 1 */
3667 mstb = drm_dp_add_mst_branch_device(1, NULL);
3668 if (mstb == NULL) {
3669 ret = -ENOMEM;
3670 goto out_unlock;
3671 }
3672 mstb->mgr = mgr;
3673
3674 /* give this the main reference */
3675 mgr->mst_primary = mstb;
3676 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3677
3678 ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
3679 DP_MST_EN |
3680 DP_UP_REQ_EN |
3681 DP_UPSTREAM_IS_SRC);
3682 if (ret < 0)
3683 goto out_unlock;
3684
3685 /* Write reset payload */
3686 drm_dp_dpcd_clear_payload(mgr->aux);
3687
3688 drm_dp_mst_queue_probe_work(mgr);
3689
3690 ret = 0;
3691 } else {
3692 /* disable MST on the device */
3693 mstb = mgr->mst_primary;
3694 mgr->mst_primary = NULL;
3695 /* this can fail if the device is gone */
3696 drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0);
3697 ret = 0;
3698 mgr->payload_id_table_cleared = false;
3699
3700 mgr->reset_rx_state = true;
3701 }
3702
3703 out_unlock:
3704 mutex_unlock(&mgr->lock);
3705 if (mstb)
3706 drm_dp_mst_topology_put_mstb(mstb);
3707 return ret;
3708
3709 }
3710 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3711
3712 static void
drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch * mstb)3713 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3714 {
3715 struct drm_dp_mst_port *port;
3716
3717 /* The link address will need to be re-sent on resume */
3718 mstb->link_address_sent = false;
3719
3720 list_for_each_entry(port, &mstb->ports, next)
3721 if (port->mstb)
3722 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3723 }
3724
3725 /**
3726 * drm_dp_mst_topology_queue_probe - Queue a topology probe
3727 * @mgr: manager to probe
3728 *
3729 * Queue a work to probe the MST topology. Driver's should call this only to
3730 * sync the topology's HW->SW state after the MST link's parameters have
3731 * changed in a way the state could've become out-of-sync. This is the case
3732 * for instance when the link rate between the source and first downstream
3733 * branch device has switched between UHBR and non-UHBR rates. Except of those
3734 * cases - for instance when a sink gets plugged/unplugged to a port - the SW
3735 * state will get updated automatically via MST UP message notifications.
3736 */
drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr * mgr)3737 void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr)
3738 {
3739 mutex_lock(&mgr->lock);
3740
3741 if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary))
3742 goto out_unlock;
3743
3744 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3745 drm_dp_mst_queue_probe_work(mgr);
3746
3747 out_unlock:
3748 mutex_unlock(&mgr->lock);
3749 }
3750 EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe);
3751
3752 /**
3753 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3754 * @mgr: manager to suspend
3755 *
3756 * This function tells the MST device that we can't handle UP messages
3757 * anymore. This should stop it from sending any since we are suspended.
3758 */
drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr * mgr)3759 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3760 {
3761 mutex_lock(&mgr->lock);
3762 drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
3763 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3764 mutex_unlock(&mgr->lock);
3765 flush_work(&mgr->up_req_work);
3766 flush_work(&mgr->work);
3767 flush_work(&mgr->delayed_destroy_work);
3768
3769 mutex_lock(&mgr->lock);
3770 if (mgr->mst_state && mgr->mst_primary)
3771 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3772 mutex_unlock(&mgr->lock);
3773 }
3774 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3775
3776 /**
3777 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3778 * @mgr: manager to resume
3779 * @sync: whether or not to perform topology reprobing synchronously
3780 *
3781 * This will fetch DPCD and see if the device is still there,
3782 * if it is, it will rewrite the MSTM control bits, and return.
3783 *
3784 * If the device fails this returns -1, and the driver should do
3785 * a full MST reprobe, in case we were undocked.
3786 *
3787 * During system resume (where it is assumed that the driver will be calling
3788 * drm_atomic_helper_resume()) this function should be called beforehand with
3789 * @sync set to true. In contexts like runtime resume where the driver is not
3790 * expected to be calling drm_atomic_helper_resume(), this function should be
3791 * called with @sync set to false in order to avoid deadlocking.
3792 *
3793 * Returns: -1 if the MST topology was removed while we were suspended, 0
3794 * otherwise.
3795 */
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr * mgr,bool sync)3796 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3797 bool sync)
3798 {
3799 u8 buf[UUID_SIZE];
3800 guid_t guid;
3801 int ret;
3802
3803 mutex_lock(&mgr->lock);
3804 if (!mgr->mst_primary)
3805 goto out_fail;
3806
3807 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
3808 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3809 goto out_fail;
3810 }
3811
3812 ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL,
3813 DP_MST_EN |
3814 DP_UP_REQ_EN |
3815 DP_UPSTREAM_IS_SRC);
3816 if (ret < 0) {
3817 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
3818 goto out_fail;
3819 }
3820
3821 /* Some hubs forget their guids after they resume */
3822 ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf));
3823 if (ret < 0) {
3824 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
3825 goto out_fail;
3826 }
3827
3828 import_guid(&guid, buf);
3829
3830 ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid);
3831 if (ret) {
3832 drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
3833 goto out_fail;
3834 }
3835
3836 /*
3837 * For the final step of resuming the topology, we need to bring the
3838 * state of our in-memory topology back into sync with reality. So,
3839 * restart the probing process as if we're probing a new hub
3840 */
3841 drm_dp_mst_queue_probe_work(mgr);
3842 mutex_unlock(&mgr->lock);
3843
3844 if (sync) {
3845 drm_dbg_kms(mgr->dev,
3846 "Waiting for link probe work to finish re-syncing topology...\n");
3847 flush_work(&mgr->work);
3848 }
3849
3850 return 0;
3851
3852 out_fail:
3853 mutex_unlock(&mgr->lock);
3854 return -1;
3855 }
3856 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3857
reset_msg_rx_state(struct drm_dp_sideband_msg_rx * msg)3858 static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
3859 {
3860 memset(msg, 0, sizeof(*msg));
3861 }
3862
3863 static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr * mgr,bool up,struct drm_dp_mst_branch ** mstb)3864 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3865 struct drm_dp_mst_branch **mstb)
3866 {
3867 int len;
3868 u8 replyblock[32];
3869 int replylen, curreply;
3870 int ret;
3871 u8 hdrlen;
3872 struct drm_dp_sideband_msg_hdr hdr;
3873 struct drm_dp_sideband_msg_rx *msg =
3874 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3875 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3876 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3877
3878 if (!up)
3879 *mstb = NULL;
3880
3881 len = min(mgr->max_dpcd_transaction_bytes, 16);
3882 ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len);
3883 if (ret < 0) {
3884 drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
3885 return false;
3886 }
3887
3888 ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
3889 if (ret == false) {
3890 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3891 1, replyblock, len, false);
3892 drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
3893 return false;
3894 }
3895
3896 if (!up) {
3897 /* Caller is responsible for giving back this reference */
3898 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3899 if (!*mstb) {
3900 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
3901 return false;
3902 }
3903 }
3904
3905 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3906 drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
3907 return false;
3908 }
3909
3910 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3911 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3912 if (!ret) {
3913 drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
3914 return false;
3915 }
3916
3917 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3918 curreply = len;
3919 while (replylen > 0) {
3920 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3921 ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply,
3922 replyblock, len);
3923 if (ret < 0) {
3924 drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
3925 len, ret);
3926 return false;
3927 }
3928
3929 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3930 if (!ret) {
3931 drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
3932 return false;
3933 }
3934
3935 curreply += len;
3936 replylen -= len;
3937 }
3938 return true;
3939 }
3940
get_msg_request_type(u8 data)3941 static int get_msg_request_type(u8 data)
3942 {
3943 return data & 0x7f;
3944 }
3945
verify_rx_request_type(struct drm_dp_mst_topology_mgr * mgr,const struct drm_dp_sideband_msg_tx * txmsg,const struct drm_dp_sideband_msg_rx * rxmsg)3946 static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
3947 const struct drm_dp_sideband_msg_tx *txmsg,
3948 const struct drm_dp_sideband_msg_rx *rxmsg)
3949 {
3950 const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
3951 const struct drm_dp_mst_branch *mstb = txmsg->dst;
3952 int tx_req_type = get_msg_request_type(txmsg->msg[0]);
3953 int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
3954 char rad_str[64];
3955
3956 if (tx_req_type == rx_req_type)
3957 return true;
3958
3959 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
3960 drm_dbg_kms(mgr->dev,
3961 "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
3962 mstb, hdr->seqno, mstb->lct, rad_str,
3963 drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
3964 drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
3965
3966 return false;
3967 }
3968
drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr * mgr)3969 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3970 {
3971 struct drm_dp_sideband_msg_tx *txmsg;
3972 struct drm_dp_mst_branch *mstb = NULL;
3973 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3974
3975 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3976 goto out_clear_reply;
3977
3978 /* Multi-packet message transmission, don't clear the reply */
3979 if (!msg->have_eomt)
3980 goto out;
3981
3982 /* find the message */
3983 mutex_lock(&mgr->qlock);
3984
3985 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3986 struct drm_dp_sideband_msg_tx, next);
3987
3988 /* Were we actually expecting a response, and from this mstb? */
3989 if (!txmsg || txmsg->dst != mstb) {
3990 struct drm_dp_sideband_msg_hdr *hdr;
3991
3992 hdr = &msg->initial_hdr;
3993 drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
3994 mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
3995
3996 mutex_unlock(&mgr->qlock);
3997
3998 goto out_clear_reply;
3999 }
4000
4001 if (!verify_rx_request_type(mgr, txmsg, msg)) {
4002 mutex_unlock(&mgr->qlock);
4003
4004 goto out_clear_reply;
4005 }
4006
4007 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
4008
4009 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4010 drm_dbg_kms(mgr->dev,
4011 "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
4012 txmsg->reply.req_type,
4013 drm_dp_mst_req_type_str(txmsg->reply.req_type),
4014 txmsg->reply.u.nak.reason,
4015 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
4016 txmsg->reply.u.nak.nak_data);
4017 }
4018
4019 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
4020 list_del(&txmsg->next);
4021
4022 mutex_unlock(&mgr->qlock);
4023
4024 wake_up_all(&mgr->tx_waitq);
4025
4026 out_clear_reply:
4027 reset_msg_rx_state(msg);
4028 out:
4029 if (mstb)
4030 drm_dp_mst_topology_put_mstb(mstb);
4031
4032 return 0;
4033 }
4034
primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr * mgr)4035 static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr)
4036 {
4037 bool probing_done = false;
4038
4039 mutex_lock(&mgr->lock);
4040
4041 if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) {
4042 probing_done = mgr->mst_primary->link_address_sent;
4043 drm_dp_mst_topology_put_mstb(mgr->mst_primary);
4044 }
4045
4046 mutex_unlock(&mgr->lock);
4047
4048 return probing_done;
4049 }
4050
4051 static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_pending_up_req * up_req)4052 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4053 struct drm_dp_pending_up_req *up_req)
4054 {
4055 struct drm_dp_mst_branch *mstb = NULL;
4056 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
4057 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
4058 bool hotplug = false, dowork = false;
4059
4060 if (hdr->broadcast) {
4061 const guid_t *guid = NULL;
4062
4063 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
4064 guid = &msg->u.conn_stat.guid;
4065 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
4066 guid = &msg->u.resource_stat.guid;
4067
4068 if (guid)
4069 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4070 } else {
4071 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4072 }
4073
4074 if (!mstb) {
4075 drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
4076 return false;
4077 }
4078
4079 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
4080 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
4081 if (!primary_mstb_probing_is_done(mgr)) {
4082 drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n");
4083 } else {
4084 dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4085 hotplug = true;
4086 }
4087 }
4088
4089 drm_dp_mst_topology_put_mstb(mstb);
4090
4091 if (dowork)
4092 queue_work(system_long_wq, &mgr->work);
4093 return hotplug;
4094 }
4095
drm_dp_mst_up_req_work(struct work_struct * work)4096 static void drm_dp_mst_up_req_work(struct work_struct *work)
4097 {
4098 struct drm_dp_mst_topology_mgr *mgr =
4099 container_of(work, struct drm_dp_mst_topology_mgr,
4100 up_req_work);
4101 struct drm_dp_pending_up_req *up_req;
4102 bool send_hotplug = false;
4103
4104 mutex_lock(&mgr->probe_lock);
4105 while (true) {
4106 mutex_lock(&mgr->up_req_lock);
4107 up_req = list_first_entry_or_null(&mgr->up_req_list,
4108 struct drm_dp_pending_up_req,
4109 next);
4110 if (up_req)
4111 list_del(&up_req->next);
4112 mutex_unlock(&mgr->up_req_lock);
4113
4114 if (!up_req)
4115 break;
4116
4117 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4118 kfree(up_req);
4119 }
4120 mutex_unlock(&mgr->probe_lock);
4121
4122 if (send_hotplug)
4123 drm_kms_helper_hotplug_event(mgr->dev);
4124 }
4125
drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr * mgr)4126 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4127 {
4128 struct drm_dp_pending_up_req *up_req;
4129 struct drm_dp_mst_branch *mst_primary;
4130 int ret = 0;
4131
4132 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4133 goto out_clear_reply;
4134
4135 if (!mgr->up_req_recv.have_eomt)
4136 return 0;
4137
4138 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4139 if (!up_req) {
4140 ret = -ENOMEM;
4141 goto out_clear_reply;
4142 }
4143
4144 INIT_LIST_HEAD(&up_req->next);
4145
4146 drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
4147
4148 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4149 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4150 drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
4151 up_req->msg.req_type);
4152 kfree(up_req);
4153 goto out_clear_reply;
4154 }
4155
4156 mutex_lock(&mgr->lock);
4157 mst_primary = mgr->mst_primary;
4158 if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {
4159 mutex_unlock(&mgr->lock);
4160 kfree(up_req);
4161 goto out_clear_reply;
4162 }
4163 mutex_unlock(&mgr->lock);
4164
4165 drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
4166 false);
4167
4168 drm_dp_mst_topology_put_mstb(mst_primary);
4169
4170 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4171 const struct drm_dp_connection_status_notify *conn_stat =
4172 &up_req->msg.u.conn_stat;
4173
4174 drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4175 conn_stat->port_number,
4176 conn_stat->legacy_device_plug_status,
4177 conn_stat->displayport_device_plug_status,
4178 conn_stat->message_capability_status,
4179 conn_stat->input_port,
4180 conn_stat->peer_device_type);
4181 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4182 const struct drm_dp_resource_status_notify *res_stat =
4183 &up_req->msg.u.resource_stat;
4184
4185 drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
4186 res_stat->port_number,
4187 res_stat->available_pbn);
4188 }
4189
4190 up_req->hdr = mgr->up_req_recv.initial_hdr;
4191 mutex_lock(&mgr->up_req_lock);
4192 list_add_tail(&up_req->next, &mgr->up_req_list);
4193 mutex_unlock(&mgr->up_req_lock);
4194 queue_work(system_long_wq, &mgr->up_req_work);
4195 out_clear_reply:
4196 reset_msg_rx_state(&mgr->up_req_recv);
4197 return ret;
4198 }
4199
update_msg_rx_state(struct drm_dp_mst_topology_mgr * mgr)4200 static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
4201 {
4202 mutex_lock(&mgr->lock);
4203 if (mgr->reset_rx_state) {
4204 mgr->reset_rx_state = false;
4205 reset_msg_rx_state(&mgr->down_rep_recv);
4206 reset_msg_rx_state(&mgr->up_req_recv);
4207 }
4208 mutex_unlock(&mgr->lock);
4209 }
4210
4211 /**
4212 * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
4213 * @mgr: manager to notify irq for.
4214 * @esi: 4 bytes from SINK_COUNT_ESI
4215 * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
4216 * @handled: whether the hpd interrupt was consumed or not
4217 *
4218 * This should be called from the driver when it detects a HPD IRQ,
4219 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
4220 * topology manager will process the sideband messages received
4221 * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
4222 * corresponding flags that Driver has to ack the DP receiver later.
4223 *
4224 * Note that driver shall also call
4225 * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
4226 * after calling this function, to try to kick off a new request in
4227 * the queue if the previous message transaction is completed.
4228 *
4229 * See also:
4230 * drm_dp_mst_hpd_irq_send_new_request()
4231 */
drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr * mgr,const u8 * esi,u8 * ack,bool * handled)4232 int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
4233 u8 *ack, bool *handled)
4234 {
4235 int ret = 0;
4236 int sc;
4237 *handled = false;
4238 sc = DP_GET_SINK_COUNT(esi[0]);
4239
4240 if (sc != mgr->sink_count) {
4241 mgr->sink_count = sc;
4242 *handled = true;
4243 }
4244
4245 update_msg_rx_state(mgr);
4246
4247 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
4248 ret = drm_dp_mst_handle_down_rep(mgr);
4249 *handled = true;
4250 ack[1] |= DP_DOWN_REP_MSG_RDY;
4251 }
4252
4253 if (esi[1] & DP_UP_REQ_MSG_RDY) {
4254 ret |= drm_dp_mst_handle_up_req(mgr);
4255 *handled = true;
4256 ack[1] |= DP_UP_REQ_MSG_RDY;
4257 }
4258
4259 return ret;
4260 }
4261 EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
4262
4263 /**
4264 * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
4265 * @mgr: manager to notify irq for.
4266 *
4267 * This should be called from the driver when mst irq event is handled
4268 * and acked. Note that new down request should only be sent when
4269 * previous message transaction is completed. Source is not supposed to generate
4270 * interleaved message transactions.
4271 */
drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr * mgr)4272 void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
4273 {
4274 struct drm_dp_sideband_msg_tx *txmsg;
4275 bool kick = true;
4276
4277 mutex_lock(&mgr->qlock);
4278 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4279 struct drm_dp_sideband_msg_tx, next);
4280 /* If last transaction is not completed yet*/
4281 if (!txmsg ||
4282 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
4283 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
4284 kick = false;
4285 mutex_unlock(&mgr->qlock);
4286
4287 if (kick)
4288 drm_dp_mst_kick_tx(mgr);
4289 }
4290 EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
4291 /**
4292 * drm_dp_mst_detect_port() - get connection status for an MST port
4293 * @connector: DRM connector for this port
4294 * @ctx: The acquisition context to use for grabbing locks
4295 * @mgr: manager for this port
4296 * @port: pointer to a port
4297 *
4298 * This returns the current connection state for a port.
4299 */
4300 int
drm_dp_mst_detect_port(struct drm_connector * connector,struct drm_modeset_acquire_ctx * ctx,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4301 drm_dp_mst_detect_port(struct drm_connector *connector,
4302 struct drm_modeset_acquire_ctx *ctx,
4303 struct drm_dp_mst_topology_mgr *mgr,
4304 struct drm_dp_mst_port *port)
4305 {
4306 int ret;
4307
4308 /* we need to search for the port in the mgr in case it's gone */
4309 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4310 if (!port)
4311 return connector_status_disconnected;
4312
4313 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4314 if (ret)
4315 goto out;
4316
4317 ret = connector_status_disconnected;
4318
4319 if (!port->ddps)
4320 goto out;
4321
4322 switch (port->pdt) {
4323 case DP_PEER_DEVICE_NONE:
4324 break;
4325 case DP_PEER_DEVICE_MST_BRANCHING:
4326 if (!port->mcs)
4327 ret = connector_status_connected;
4328 break;
4329
4330 case DP_PEER_DEVICE_SST_SINK:
4331 ret = connector_status_connected;
4332 /* for logical ports - cache the EDID */
4333 if (drm_dp_mst_port_is_logical(port) && !port->cached_edid)
4334 port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
4335 break;
4336 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4337 if (port->ldps)
4338 ret = connector_status_connected;
4339 break;
4340 }
4341 out:
4342 drm_dp_mst_topology_put_port(port);
4343 return ret;
4344 }
4345 EXPORT_SYMBOL(drm_dp_mst_detect_port);
4346
4347 /**
4348 * drm_dp_mst_edid_read() - get EDID for an MST port
4349 * @connector: toplevel connector to get EDID for
4350 * @mgr: manager for this port
4351 * @port: unverified pointer to a port.
4352 *
4353 * This returns an EDID for the port connected to a connector,
4354 * It validates the pointer still exists so the caller doesn't require a
4355 * reference.
4356 */
drm_dp_mst_edid_read(struct drm_connector * connector,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4357 const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
4358 struct drm_dp_mst_topology_mgr *mgr,
4359 struct drm_dp_mst_port *port)
4360 {
4361 const struct drm_edid *drm_edid;
4362
4363 /* we need to search for the port in the mgr in case it's gone */
4364 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4365 if (!port)
4366 return NULL;
4367
4368 if (port->cached_edid)
4369 drm_edid = drm_edid_dup(port->cached_edid);
4370 else
4371 drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
4372
4373 drm_dp_mst_topology_put_port(port);
4374
4375 return drm_edid;
4376 }
4377 EXPORT_SYMBOL(drm_dp_mst_edid_read);
4378
4379 /**
4380 * drm_dp_mst_get_edid() - get EDID for an MST port
4381 * @connector: toplevel connector to get EDID for
4382 * @mgr: manager for this port
4383 * @port: unverified pointer to a port.
4384 *
4385 * This function is deprecated; please use drm_dp_mst_edid_read() instead.
4386 *
4387 * This returns an EDID for the port connected to a connector,
4388 * It validates the pointer still exists so the caller doesn't require a
4389 * reference.
4390 */
drm_dp_mst_get_edid(struct drm_connector * connector,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4391 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
4392 struct drm_dp_mst_topology_mgr *mgr,
4393 struct drm_dp_mst_port *port)
4394 {
4395 const struct drm_edid *drm_edid;
4396 struct edid *edid;
4397
4398 drm_edid = drm_dp_mst_edid_read(connector, mgr, port);
4399
4400 edid = drm_edid_duplicate(drm_edid_raw(drm_edid));
4401
4402 drm_edid_free(drm_edid);
4403
4404 return edid;
4405 }
4406 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4407
4408 /**
4409 * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
4410 * @state: global atomic state
4411 * @mgr: MST topology manager for the port
4412 * @port: port to find time slots for
4413 * @pbn: bandwidth required for the mode in PBN
4414 *
4415 * Allocates time slots to @port, replacing any previous time slot allocations it may
4416 * have had. Any atomic drivers which support MST must call this function in
4417 * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
4418 * change the current time slot allocation for the new state, and ensure the MST
4419 * atomic state is added whenever the state of payloads in the topology changes.
4420 *
4421 * Allocations set by this function are not checked against the bandwidth
4422 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4423 *
4424 * Additionally, it is OK to call this function multiple times on the same
4425 * @port as needed. It is not OK however, to call this function and
4426 * drm_dp_atomic_release_time_slots() in the same atomic check phase.
4427 *
4428 * See also:
4429 * drm_dp_atomic_release_time_slots()
4430 * drm_dp_mst_atomic_check()
4431 *
4432 * Returns:
4433 * Total slots in the atomic state assigned for this port, or a negative error
4434 * code if the port no longer exists
4435 */
drm_dp_atomic_find_time_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int pbn)4436 int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
4437 struct drm_dp_mst_topology_mgr *mgr,
4438 struct drm_dp_mst_port *port, int pbn)
4439 {
4440 struct drm_dp_mst_topology_state *topology_state;
4441 struct drm_dp_mst_atomic_payload *payload = NULL;
4442 struct drm_connector_state *conn_state;
4443 int prev_slots = 0, prev_bw = 0, req_slots;
4444
4445 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4446 if (IS_ERR(topology_state))
4447 return PTR_ERR(topology_state);
4448
4449 conn_state = drm_atomic_get_new_connector_state(state, port->connector);
4450 topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
4451
4452 /* Find the current allocation for this port, if any */
4453 payload = drm_atomic_get_mst_payload_state(topology_state, port);
4454 if (payload) {
4455 prev_slots = payload->time_slots;
4456 prev_bw = payload->pbn;
4457
4458 /*
4459 * This should never happen, unless the driver tries
4460 * releasing and allocating the same timeslot allocation,
4461 * which is an error
4462 */
4463 if (drm_WARN_ON(mgr->dev, payload->delete)) {
4464 drm_err(mgr->dev,
4465 "cannot allocate and release time slots on [MST PORT:%p] in the same state\n",
4466 port);
4467 return -EINVAL;
4468 }
4469 }
4470
4471 req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full);
4472
4473 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
4474 port->connector->base.id, port->connector->name,
4475 port, prev_slots, req_slots);
4476 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4477 port->connector->base.id, port->connector->name,
4478 port, prev_bw, pbn);
4479
4480 /* Add the new allocation to the state, note the VCPI isn't assigned until the end */
4481 if (!payload) {
4482 payload = kzalloc(sizeof(*payload), GFP_KERNEL);
4483 if (!payload)
4484 return -ENOMEM;
4485
4486 drm_dp_mst_get_port_malloc(port);
4487 payload->port = port;
4488 payload->vc_start_slot = -1;
4489 payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;
4490 list_add(&payload->next, &topology_state->payloads);
4491 }
4492 payload->time_slots = req_slots;
4493 payload->pbn = pbn;
4494
4495 return req_slots;
4496 }
4497 EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
4498
4499 /**
4500 * drm_dp_atomic_release_time_slots() - Release allocated time slots
4501 * @state: global atomic state
4502 * @mgr: MST topology manager for the port
4503 * @port: The port to release the time slots from
4504 *
4505 * Releases any time slots that have been allocated to a port in the atomic
4506 * state. Any atomic drivers which support MST must call this function
4507 * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
4508 * This helper will check whether time slots would be released by the new state and
4509 * respond accordingly, along with ensuring the MST state is always added to the
4510 * atomic state whenever a new state would modify the state of payloads on the
4511 * topology.
4512 *
4513 * It is OK to call this even if @port has been removed from the system.
4514 * Additionally, it is OK to call this function multiple times on the same
4515 * @port as needed. It is not OK however, to call this function and
4516 * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
4517 * phase.
4518 *
4519 * See also:
4520 * drm_dp_atomic_find_time_slots()
4521 * drm_dp_mst_atomic_check()
4522 *
4523 * Returns:
4524 * 0 on success, negative error code otherwise
4525 */
drm_dp_atomic_release_time_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4526 int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
4527 struct drm_dp_mst_topology_mgr *mgr,
4528 struct drm_dp_mst_port *port)
4529 {
4530 struct drm_dp_mst_topology_state *topology_state;
4531 struct drm_dp_mst_atomic_payload *payload;
4532 struct drm_connector_state *old_conn_state, *new_conn_state;
4533 bool update_payload = true;
4534
4535 old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
4536 if (!old_conn_state->crtc)
4537 return 0;
4538
4539 /* If the CRTC isn't disabled by this state, don't release it's payload */
4540 new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
4541 if (new_conn_state->crtc) {
4542 struct drm_crtc_state *crtc_state =
4543 drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
4544
4545 /* No modeset means no payload changes, so it's safe to not pull in the MST state */
4546 if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
4547 return 0;
4548
4549 if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
4550 update_payload = false;
4551 }
4552
4553 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4554 if (IS_ERR(topology_state))
4555 return PTR_ERR(topology_state);
4556
4557 topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
4558 if (!update_payload)
4559 return 0;
4560
4561 payload = drm_atomic_get_mst_payload_state(topology_state, port);
4562 if (WARN_ON(!payload)) {
4563 drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
4564 port, &topology_state->base);
4565 return -EINVAL;
4566 }
4567
4568 if (new_conn_state->crtc)
4569 return 0;
4570
4571 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
4572 if (!payload->delete) {
4573 payload->pbn = 0;
4574 payload->delete = true;
4575 topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
4576 }
4577
4578 return 0;
4579 }
4580 EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
4581
4582 /**
4583 * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
4584 * @state: global atomic state
4585 *
4586 * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
4587 * currently assigned to an MST topology. Drivers must call this hook from their
4588 * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
4589 *
4590 * Returns:
4591 * 0 if all CRTC commits were retrieved successfully, negative error code otherwise
4592 */
drm_dp_mst_atomic_setup_commit(struct drm_atomic_state * state)4593 int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
4594 {
4595 struct drm_dp_mst_topology_mgr *mgr;
4596 struct drm_dp_mst_topology_state *mst_state;
4597 struct drm_crtc *crtc;
4598 struct drm_crtc_state *crtc_state;
4599 int i, j, commit_idx, num_commit_deps;
4600
4601 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4602 if (!mst_state->pending_crtc_mask)
4603 continue;
4604
4605 num_commit_deps = hweight32(mst_state->pending_crtc_mask);
4606 mst_state->commit_deps = kmalloc_array(num_commit_deps,
4607 sizeof(*mst_state->commit_deps), GFP_KERNEL);
4608 if (!mst_state->commit_deps)
4609 return -ENOMEM;
4610 mst_state->num_commit_deps = num_commit_deps;
4611
4612 commit_idx = 0;
4613 for_each_new_crtc_in_state(state, crtc, crtc_state, j) {
4614 if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
4615 mst_state->commit_deps[commit_idx++] =
4616 drm_crtc_commit_get(crtc_state->commit);
4617 }
4618 }
4619 }
4620
4621 return 0;
4622 }
4623 EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
4624
4625 /**
4626 * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
4627 * prepare new MST state for commit
4628 * @state: global atomic state
4629 *
4630 * Goes through any MST topologies in this atomic state, and waits for any pending commits which
4631 * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
4632 * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
4633 * with eachother by forcing them to be executed sequentially in situations where the only resources
4634 * the modeset objects in these commits share are an MST topology.
4635 *
4636 * This function also prepares the new MST state for commit by performing some state preparation
4637 * which can't be done until this point, such as reading back the final VC start slots (which are
4638 * determined at commit-time) from the previous state.
4639 *
4640 * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
4641 * or whatever their equivalent of that is.
4642 */
drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state * state)4643 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
4644 {
4645 struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
4646 struct drm_dp_mst_topology_mgr *mgr;
4647 struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
4648 int i, j, ret;
4649
4650 for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
4651 for (j = 0; j < old_mst_state->num_commit_deps; j++) {
4652 ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
4653 if (ret < 0)
4654 drm_err(state->dev, "Failed to wait for %s: %d\n",
4655 old_mst_state->commit_deps[j]->crtc->name, ret);
4656 }
4657
4658 /* Now that previous state is committed, it's safe to copy over the start slot
4659 * and allocation status assignments
4660 */
4661 list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
4662 if (old_payload->delete)
4663 continue;
4664
4665 new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
4666 old_payload->port);
4667 new_payload->vc_start_slot = old_payload->vc_start_slot;
4668 new_payload->payload_allocation_status =
4669 old_payload->payload_allocation_status;
4670 }
4671 }
4672 }
4673 EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
4674
4675 /**
4676 * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
4677 * in SST mode
4678 * @new_conn_state: The new connector state of the &drm_connector
4679 * @mgr: The MST topology manager for the &drm_connector
4680 *
4681 * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
4682 * serialize non-blocking commits happening on the real DP connector of an MST topology switching
4683 * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
4684 * MST topology will never share the same &drm_encoder.
4685 *
4686 * This function takes care of this serialization issue, by checking a root MST connector's atomic
4687 * state to determine if it is about to have a modeset - and then pulling in the MST topology state
4688 * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
4689 *
4690 * Drivers implementing MST must call this function from the
4691 * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
4692 * driving MST sinks.
4693 *
4694 * Returns:
4695 * 0 on success, negative error code otherwise
4696 */
drm_dp_mst_root_conn_atomic_check(struct drm_connector_state * new_conn_state,struct drm_dp_mst_topology_mgr * mgr)4697 int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
4698 struct drm_dp_mst_topology_mgr *mgr)
4699 {
4700 struct drm_atomic_state *state = new_conn_state->state;
4701 struct drm_connector_state *old_conn_state =
4702 drm_atomic_get_old_connector_state(state, new_conn_state->connector);
4703 struct drm_crtc_state *crtc_state;
4704 struct drm_dp_mst_topology_state *mst_state = NULL;
4705
4706 if (new_conn_state->crtc) {
4707 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
4708 if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
4709 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4710 if (IS_ERR(mst_state))
4711 return PTR_ERR(mst_state);
4712
4713 mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
4714 }
4715 }
4716
4717 if (old_conn_state->crtc) {
4718 crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
4719 if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
4720 if (!mst_state) {
4721 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4722 if (IS_ERR(mst_state))
4723 return PTR_ERR(mst_state);
4724 }
4725
4726 mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
4727 }
4728 }
4729
4730 return 0;
4731 }
4732 EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
4733
4734 /**
4735 * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
4736 * @mst_state: mst_state to update
4737 * @link_encoding_cap: the ecoding format on the link
4738 */
drm_dp_mst_update_slots(struct drm_dp_mst_topology_state * mst_state,uint8_t link_encoding_cap)4739 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
4740 {
4741 if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
4742 mst_state->total_avail_slots = 64;
4743 mst_state->start_slot = 0;
4744 } else {
4745 mst_state->total_avail_slots = 63;
4746 mst_state->start_slot = 1;
4747 }
4748
4749 DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
4750 (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
4751 mst_state);
4752 }
4753 EXPORT_SYMBOL(drm_dp_mst_update_slots);
4754
4755 /**
4756 * drm_dp_check_act_status() - Polls for ACT handled status.
4757 * @mgr: manager to use
4758 *
4759 * Tries waiting for the MST hub to finish updating it's payload table by
4760 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4761 * take that long).
4762 *
4763 * Returns:
4764 * 0 if the ACT was handled in time, negative error code on failure.
4765 */
drm_dp_check_act_status(struct drm_dp_mst_topology_mgr * mgr)4766 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4767 {
4768 /*
4769 * There doesn't seem to be any recommended retry count or timeout in
4770 * the MST specification. Since some hubs have been observed to take
4771 * over 1 second to update their payload allocations under certain
4772 * conditions, we use a rather large timeout value of 3 seconds.
4773 */
4774 return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);
4775 }
4776 EXPORT_SYMBOL(drm_dp_check_act_status);
4777
4778 /**
4779 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4780 * @clock: dot clock
4781 * @bpp: bpp as .4 binary fixed point
4782 *
4783 * This uses the formula in the spec to calculate the PBN value for a mode.
4784 */
drm_dp_calc_pbn_mode(int clock,int bpp)4785 int drm_dp_calc_pbn_mode(int clock, int bpp)
4786 {
4787 /*
4788 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4789 * common multiplier to render an integer PBN for all link rate/lane
4790 * counts combinations
4791 * calculate
4792 * peak_kbps = clock * bpp / 16
4793 * peak_kbps *= SSC overhead / 1000000
4794 * peak_kbps /= 8 convert to Kbytes
4795 * peak_kBps *= (64/54) / 1000 convert to PBN
4796 */
4797 /*
4798 * TODO: Use the actual link and mode parameters to calculate
4799 * the overhead. For now it's assumed that these are
4800 * 4 link lanes, 4096 hactive pixels, which don't add any
4801 * significant data padding overhead and that there is no DSC
4802 * or FEC overhead.
4803 */
4804 int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp,
4805 DRM_DP_BW_OVERHEAD_MST |
4806 DRM_DP_BW_OVERHEAD_SSC_REF_CLK);
4807
4808 return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4),
4809 1000000ULL * 8 * 54 * 1000);
4810 }
4811 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4812
4813 /* we want to kick the TX after we've ack the up/down IRQs. */
drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr * mgr)4814 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4815 {
4816 queue_work(system_long_wq, &mgr->tx_work);
4817 }
4818
4819 /*
4820 * Helper function for parsing DP device types into convenient strings
4821 * for use with dp_mst_topology
4822 */
pdt_to_string(u8 pdt)4823 static const char *pdt_to_string(u8 pdt)
4824 {
4825 switch (pdt) {
4826 case DP_PEER_DEVICE_NONE:
4827 return "NONE";
4828 case DP_PEER_DEVICE_SOURCE_OR_SST:
4829 return "SOURCE OR SST";
4830 case DP_PEER_DEVICE_MST_BRANCHING:
4831 return "MST BRANCHING";
4832 case DP_PEER_DEVICE_SST_SINK:
4833 return "SST SINK";
4834 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4835 return "DP LEGACY CONV";
4836 default:
4837 return "ERR";
4838 }
4839 }
4840
drm_dp_mst_dump_mstb(struct seq_file * m,struct drm_dp_mst_branch * mstb)4841 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4842 struct drm_dp_mst_branch *mstb)
4843 {
4844 struct drm_dp_mst_port *port;
4845 int tabs = mstb->lct;
4846 char prefix[10];
4847 int i;
4848
4849 for (i = 0; i < tabs; i++)
4850 prefix[i] = '\t';
4851 prefix[i] = '\0';
4852
4853 seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
4854 list_for_each_entry(port, &mstb->ports, next) {
4855 seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
4856 prefix,
4857 port->port_num,
4858 port,
4859 port->input ? "input" : "output",
4860 pdt_to_string(port->pdt),
4861 port->ddps,
4862 port->ldps,
4863 port->num_sdp_streams,
4864 port->num_sdp_stream_sinks,
4865 port->fec_capable ? "true" : "false",
4866 port->connector);
4867 if (port->mstb)
4868 drm_dp_mst_dump_mstb(m, port->mstb);
4869 }
4870 }
4871
4872 #define DP_PAYLOAD_TABLE_SIZE 64
4873
dump_dp_payload_table(struct drm_dp_mst_topology_mgr * mgr,char * buf)4874 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4875 char *buf)
4876 {
4877 int i;
4878
4879 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4880 if (drm_dp_dpcd_read_data(mgr->aux,
4881 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4882 &buf[i], 16) < 0)
4883 return false;
4884 }
4885 return true;
4886 }
4887
fetch_monitor_name(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,char * name,int namelen)4888 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4889 struct drm_dp_mst_port *port, char *name,
4890 int namelen)
4891 {
4892 struct edid *mst_edid;
4893
4894 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4895 drm_edid_get_monitor_name(mst_edid, name, namelen);
4896 kfree(mst_edid);
4897 }
4898
4899 /**
4900 * drm_dp_mst_dump_topology(): dump topology to seq file.
4901 * @m: seq_file to dump output to
4902 * @mgr: manager to dump current topology for.
4903 *
4904 * helper to dump MST topology to a seq file for debugfs.
4905 */
drm_dp_mst_dump_topology(struct seq_file * m,struct drm_dp_mst_topology_mgr * mgr)4906 void drm_dp_mst_dump_topology(struct seq_file *m,
4907 struct drm_dp_mst_topology_mgr *mgr)
4908 {
4909 struct drm_dp_mst_topology_state *state;
4910 struct drm_dp_mst_atomic_payload *payload;
4911 int i, ret;
4912
4913 static const char *const status[] = {
4914 "None",
4915 "Local",
4916 "DFP",
4917 "Remote",
4918 };
4919
4920 mutex_lock(&mgr->lock);
4921 if (mgr->mst_primary)
4922 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4923
4924 /* dump VCPIs */
4925 mutex_unlock(&mgr->lock);
4926
4927 ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
4928 if (ret < 0)
4929 return;
4930
4931 state = to_drm_dp_mst_topology_state(mgr->base.state);
4932 seq_printf(m, "\n*** Atomic state info ***\n");
4933 seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
4934 state->payload_mask, mgr->max_payloads, state->start_slot,
4935 dfixed_trunc(state->pbn_div));
4936
4937 seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n");
4938 for (i = 0; i < mgr->max_payloads; i++) {
4939 list_for_each_entry(payload, &state->payloads, next) {
4940 char name[14];
4941
4942 if (payload->vcpi != i || payload->delete)
4943 continue;
4944
4945 fetch_monitor_name(mgr, payload->port, name, sizeof(name));
4946 seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %8s %19s\n",
4947 i,
4948 payload->port->port_num,
4949 payload->vcpi,
4950 payload->vc_start_slot,
4951 payload->vc_start_slot + payload->time_slots - 1,
4952 payload->pbn,
4953 payload->dsc_enabled ? "Y" : "N",
4954 status[payload->payload_allocation_status],
4955 (*name != 0) ? name : "Unknown");
4956 }
4957 }
4958
4959 seq_printf(m, "\n*** DPCD Info ***\n");
4960 mutex_lock(&mgr->lock);
4961 if (mgr->mst_primary) {
4962 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4963 int ret;
4964
4965 if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
4966 seq_printf(m, "dpcd read failed\n");
4967 goto out;
4968 }
4969 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4970
4971 ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2);
4972 if (ret < 0) {
4973 seq_printf(m, "faux/mst read failed\n");
4974 goto out;
4975 }
4976 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4977
4978 ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1);
4979 if (ret < 0) {
4980 seq_printf(m, "mst ctrl read failed\n");
4981 goto out;
4982 }
4983 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4984
4985 /* dump the standard OUI branch header */
4986 ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf,
4987 DP_BRANCH_OUI_HEADER_SIZE);
4988 if (ret < 0) {
4989 seq_printf(m, "branch oui read failed\n");
4990 goto out;
4991 }
4992 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4993
4994 for (i = 0x3; i < 0x8 && buf[i]; i++)
4995 seq_putc(m, buf[i]);
4996 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4997 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4998 if (dump_dp_payload_table(mgr, buf))
4999 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
5000 }
5001
5002 out:
5003 mutex_unlock(&mgr->lock);
5004 drm_modeset_unlock(&mgr->base.lock);
5005 }
5006 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
5007
drm_dp_tx_work(struct work_struct * work)5008 static void drm_dp_tx_work(struct work_struct *work)
5009 {
5010 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
5011
5012 mutex_lock(&mgr->qlock);
5013 if (!list_empty(&mgr->tx_msg_downq))
5014 process_single_down_tx_qlock(mgr);
5015 mutex_unlock(&mgr->qlock);
5016 }
5017
5018 static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port * port)5019 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
5020 {
5021 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
5022
5023 if (port->connector) {
5024 drm_connector_unregister(port->connector);
5025 drm_connector_put(port->connector);
5026 }
5027
5028 drm_dp_mst_put_port_malloc(port);
5029 }
5030
5031 static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch * mstb)5032 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
5033 {
5034 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
5035 struct drm_dp_mst_port *port, *port_tmp;
5036 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
5037 bool wake_tx = false;
5038
5039 mutex_lock(&mgr->lock);
5040 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
5041 list_del(&port->next);
5042 drm_dp_mst_topology_put_port(port);
5043 }
5044 mutex_unlock(&mgr->lock);
5045
5046 /* drop any tx slot msg */
5047 mutex_lock(&mstb->mgr->qlock);
5048 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
5049 if (txmsg->dst != mstb)
5050 continue;
5051
5052 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
5053 list_del(&txmsg->next);
5054 wake_tx = true;
5055 }
5056 mutex_unlock(&mstb->mgr->qlock);
5057
5058 if (wake_tx)
5059 wake_up_all(&mstb->mgr->tx_waitq);
5060
5061 drm_dp_mst_put_mstb_malloc(mstb);
5062 }
5063
drm_dp_delayed_destroy_work(struct work_struct * work)5064 static void drm_dp_delayed_destroy_work(struct work_struct *work)
5065 {
5066 struct drm_dp_mst_topology_mgr *mgr =
5067 container_of(work, struct drm_dp_mst_topology_mgr,
5068 delayed_destroy_work);
5069 bool send_hotplug = false, go_again;
5070
5071 /*
5072 * Not a regular list traverse as we have to drop the destroy
5073 * connector lock before destroying the mstb/port, to avoid AB->BA
5074 * ordering between this lock and the config mutex.
5075 */
5076 do {
5077 go_again = false;
5078
5079 for (;;) {
5080 struct drm_dp_mst_branch *mstb;
5081
5082 mutex_lock(&mgr->delayed_destroy_lock);
5083 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
5084 struct drm_dp_mst_branch,
5085 destroy_next);
5086 if (mstb)
5087 list_del(&mstb->destroy_next);
5088 mutex_unlock(&mgr->delayed_destroy_lock);
5089
5090 if (!mstb)
5091 break;
5092
5093 drm_dp_delayed_destroy_mstb(mstb);
5094 go_again = true;
5095 }
5096
5097 for (;;) {
5098 struct drm_dp_mst_port *port;
5099
5100 mutex_lock(&mgr->delayed_destroy_lock);
5101 port = list_first_entry_or_null(&mgr->destroy_port_list,
5102 struct drm_dp_mst_port,
5103 next);
5104 if (port)
5105 list_del(&port->next);
5106 mutex_unlock(&mgr->delayed_destroy_lock);
5107
5108 if (!port)
5109 break;
5110
5111 drm_dp_delayed_destroy_port(port);
5112 send_hotplug = true;
5113 go_again = true;
5114 }
5115 } while (go_again);
5116
5117 if (send_hotplug)
5118 drm_kms_helper_hotplug_event(mgr->dev);
5119 }
5120
5121 static struct drm_private_state *
drm_dp_mst_duplicate_state(struct drm_private_obj * obj)5122 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
5123 {
5124 struct drm_dp_mst_topology_state *state, *old_state =
5125 to_dp_mst_topology_state(obj->state);
5126 struct drm_dp_mst_atomic_payload *pos, *payload;
5127
5128 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
5129 if (!state)
5130 return NULL;
5131
5132 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
5133
5134 INIT_LIST_HEAD(&state->payloads);
5135 state->commit_deps = NULL;
5136 state->num_commit_deps = 0;
5137 state->pending_crtc_mask = 0;
5138
5139 list_for_each_entry(pos, &old_state->payloads, next) {
5140 /* Prune leftover freed timeslot allocations */
5141 if (pos->delete)
5142 continue;
5143
5144 payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);
5145 if (!payload)
5146 goto fail;
5147
5148 drm_dp_mst_get_port_malloc(payload->port);
5149 list_add(&payload->next, &state->payloads);
5150 }
5151
5152 return &state->base;
5153
5154 fail:
5155 list_for_each_entry_safe(pos, payload, &state->payloads, next) {
5156 drm_dp_mst_put_port_malloc(pos->port);
5157 kfree(pos);
5158 }
5159 kfree(state);
5160
5161 return NULL;
5162 }
5163
drm_dp_mst_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)5164 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
5165 struct drm_private_state *state)
5166 {
5167 struct drm_dp_mst_topology_state *mst_state =
5168 to_dp_mst_topology_state(state);
5169 struct drm_dp_mst_atomic_payload *pos, *tmp;
5170 int i;
5171
5172 list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {
5173 /* We only keep references to ports with active payloads */
5174 if (!pos->delete)
5175 drm_dp_mst_put_port_malloc(pos->port);
5176 kfree(pos);
5177 }
5178
5179 for (i = 0; i < mst_state->num_commit_deps; i++)
5180 drm_crtc_commit_put(mst_state->commit_deps[i]);
5181
5182 kfree(mst_state->commit_deps);
5183 kfree(mst_state);
5184 }
5185
drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port * port,struct drm_dp_mst_branch * branch)5186 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5187 struct drm_dp_mst_branch *branch)
5188 {
5189 while (port->parent) {
5190 if (port->parent == branch)
5191 return true;
5192
5193 if (port->parent->port_parent)
5194 port = port->parent->port_parent;
5195 else
5196 break;
5197 }
5198 return false;
5199 }
5200
5201 static bool
drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,struct drm_dp_mst_port * parent)5202 drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr,
5203 struct drm_dp_mst_port *port,
5204 struct drm_dp_mst_port *parent)
5205 {
5206 if (!mgr->mst_primary)
5207 return false;
5208
5209 port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
5210 port);
5211 if (!port)
5212 return false;
5213
5214 if (!parent)
5215 return true;
5216
5217 parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary,
5218 parent);
5219 if (!parent)
5220 return false;
5221
5222 if (!parent->mstb)
5223 return false;
5224
5225 return drm_dp_mst_port_downstream_of_branch(port, parent->mstb);
5226 }
5227
5228 /**
5229 * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port
5230 * @mgr: MST topology manager
5231 * @port: the port being looked up
5232 * @parent: the parent port
5233 *
5234 * The function returns %true if @port is downstream of @parent. If @parent is
5235 * %NULL - denoting the root port - the function returns %true if @port is in
5236 * @mgr's topology.
5237 */
5238 bool
drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,struct drm_dp_mst_port * parent)5239 drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
5240 struct drm_dp_mst_port *port,
5241 struct drm_dp_mst_port *parent)
5242 {
5243 bool ret;
5244
5245 mutex_lock(&mgr->lock);
5246 ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent);
5247 mutex_unlock(&mgr->lock);
5248
5249 return ret;
5250 }
5251 EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent);
5252
5253 static int
5254 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5255 struct drm_dp_mst_topology_state *state,
5256 struct drm_dp_mst_port **failing_port);
5257
5258 static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_topology_state * state,struct drm_dp_mst_port ** failing_port)5259 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5260 struct drm_dp_mst_topology_state *state,
5261 struct drm_dp_mst_port **failing_port)
5262 {
5263 struct drm_dp_mst_atomic_payload *payload;
5264 struct drm_dp_mst_port *port;
5265 int pbn_used = 0, ret;
5266 bool found = false;
5267
5268 /* Check that we have at least one port in our state that's downstream
5269 * of this branch, otherwise we can skip this branch
5270 */
5271 list_for_each_entry(payload, &state->payloads, next) {
5272 if (!payload->pbn ||
5273 !drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
5274 continue;
5275
5276 found = true;
5277 break;
5278 }
5279 if (!found)
5280 return 0;
5281
5282 if (mstb->port_parent)
5283 drm_dbg_atomic(mstb->mgr->dev,
5284 "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5285 mstb->port_parent->parent, mstb->port_parent, mstb);
5286 else
5287 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
5288
5289 list_for_each_entry(port, &mstb->ports, next) {
5290 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port);
5291 if (ret < 0)
5292 return ret;
5293
5294 pbn_used += ret;
5295 }
5296
5297 return pbn_used;
5298 }
5299
5300 static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port * port,struct drm_dp_mst_topology_state * state,struct drm_dp_mst_port ** failing_port)5301 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5302 struct drm_dp_mst_topology_state *state,
5303 struct drm_dp_mst_port **failing_port)
5304 {
5305 struct drm_dp_mst_atomic_payload *payload;
5306 int pbn_used = 0;
5307
5308 if (port->pdt == DP_PEER_DEVICE_NONE)
5309 return 0;
5310
5311 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5312 payload = drm_atomic_get_mst_payload_state(state, port);
5313 if (!payload)
5314 return 0;
5315
5316 /*
5317 * This could happen if the sink deasserted its HPD line, but
5318 * the branch device still reports it as attached (PDT != NONE).
5319 */
5320 if (!port->full_pbn) {
5321 drm_dbg_atomic(port->mgr->dev,
5322 "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
5323 port->parent, port);
5324 *failing_port = port;
5325 return -EINVAL;
5326 }
5327
5328 pbn_used = payload->pbn;
5329 } else {
5330 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5331 state,
5332 failing_port);
5333 if (pbn_used <= 0)
5334 return pbn_used;
5335 }
5336
5337 if (pbn_used > port->full_pbn) {
5338 drm_dbg_atomic(port->mgr->dev,
5339 "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5340 port->parent, port, pbn_used, port->full_pbn);
5341 *failing_port = port;
5342 return -ENOSPC;
5343 }
5344
5345 drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5346 port->parent, port, pbn_used, port->full_pbn);
5347
5348 return pbn_used;
5349 }
5350
5351 static inline int
drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state)5352 drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
5353 struct drm_dp_mst_topology_state *mst_state)
5354 {
5355 struct drm_dp_mst_atomic_payload *payload;
5356 int avail_slots = mst_state->total_avail_slots, payload_count = 0;
5357
5358 list_for_each_entry(payload, &mst_state->payloads, next) {
5359 /* Releasing payloads is always OK-even if the port is gone */
5360 if (payload->delete) {
5361 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
5362 payload->port);
5363 continue;
5364 }
5365
5366 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
5367 payload->port, payload->time_slots);
5368
5369 avail_slots -= payload->time_slots;
5370 if (avail_slots < 0) {
5371 drm_dbg_atomic(mgr->dev,
5372 "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",
5373 payload->port, mst_state, avail_slots + payload->time_slots);
5374 return -ENOSPC;
5375 }
5376
5377 if (++payload_count > mgr->max_payloads) {
5378 drm_dbg_atomic(mgr->dev,
5379 "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5380 mgr, mst_state, mgr->max_payloads);
5381 return -EINVAL;
5382 }
5383
5384 /* Assign a VCPI */
5385 if (!payload->vcpi) {
5386 payload->vcpi = ffz(mst_state->payload_mask) + 1;
5387 drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
5388 payload->port, payload->vcpi);
5389 mst_state->payload_mask |= BIT(payload->vcpi - 1);
5390 }
5391 }
5392
5393 if (!payload_count)
5394 mst_state->pbn_div.full = dfixed_const(0);
5395
5396 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
5397 mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots,
5398 mst_state->total_avail_slots - avail_slots);
5399
5400 return 0;
5401 }
5402
5403 /**
5404 * drm_dp_mst_add_affected_dsc_crtcs
5405 * @state: Pointer to the new struct drm_dp_mst_topology_state
5406 * @mgr: MST topology manager
5407 *
5408 * Whenever there is a change in mst topology
5409 * DSC configuration would have to be recalculated
5410 * therefore we need to trigger modeset on all affected
5411 * CRTCs in that topology
5412 *
5413 * See also:
5414 * drm_dp_mst_atomic_enable_dsc()
5415 */
drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)5416 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5417 {
5418 struct drm_dp_mst_topology_state *mst_state;
5419 struct drm_dp_mst_atomic_payload *pos;
5420 struct drm_connector *connector;
5421 struct drm_connector_state *conn_state;
5422 struct drm_crtc *crtc;
5423 struct drm_crtc_state *crtc_state;
5424
5425 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5426
5427 if (IS_ERR(mst_state))
5428 return PTR_ERR(mst_state);
5429
5430 list_for_each_entry(pos, &mst_state->payloads, next) {
5431
5432 connector = pos->port->connector;
5433
5434 if (!connector)
5435 return -EINVAL;
5436
5437 conn_state = drm_atomic_get_connector_state(state, connector);
5438
5439 if (IS_ERR(conn_state))
5440 return PTR_ERR(conn_state);
5441
5442 crtc = conn_state->crtc;
5443
5444 if (!crtc)
5445 continue;
5446
5447 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5448 continue;
5449
5450 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5451
5452 if (IS_ERR(crtc_state))
5453 return PTR_ERR(crtc_state);
5454
5455 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5456 mgr, crtc);
5457
5458 crtc_state->mode_changed = true;
5459 }
5460 return 0;
5461 }
5462 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5463
5464 /**
5465 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5466 * @state: Pointer to the new drm_atomic_state
5467 * @port: Pointer to the affected MST Port
5468 * @pbn: Newly recalculated bw required for link with DSC enabled
5469 * @enable: Boolean flag to enable or disable DSC on the port
5470 *
5471 * This function enables DSC on the given Port
5472 * by recalculating its vcpi from pbn provided
5473 * and sets dsc_enable flag to keep track of which
5474 * ports have DSC enabled
5475 *
5476 */
drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state * state,struct drm_dp_mst_port * port,int pbn,bool enable)5477 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5478 struct drm_dp_mst_port *port,
5479 int pbn, bool enable)
5480 {
5481 struct drm_dp_mst_topology_state *mst_state;
5482 struct drm_dp_mst_atomic_payload *payload;
5483 int time_slots = 0;
5484
5485 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5486 if (IS_ERR(mst_state))
5487 return PTR_ERR(mst_state);
5488
5489 payload = drm_atomic_get_mst_payload_state(mst_state, port);
5490 if (!payload) {
5491 drm_dbg_atomic(state->dev,
5492 "[MST PORT:%p] Couldn't find payload in mst state %p\n",
5493 port, mst_state);
5494 return -EINVAL;
5495 }
5496
5497 if (payload->dsc_enabled == enable) {
5498 drm_dbg_atomic(state->dev,
5499 "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",
5500 port, enable, payload->time_slots);
5501 time_slots = payload->time_slots;
5502 }
5503
5504 if (enable) {
5505 time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
5506 drm_dbg_atomic(state->dev,
5507 "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
5508 port, time_slots);
5509 if (time_slots < 0)
5510 return -EINVAL;
5511 }
5512
5513 payload->dsc_enabled = enable;
5514
5515 return time_slots;
5516 }
5517 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5518
5519 /**
5520 * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager
5521 * @state: The global atomic state
5522 * @mgr: Manager to check
5523 * @mst_state: The MST atomic state for @mgr
5524 * @failing_port: Returns the port with a BW limitation
5525 *
5526 * Checks the given MST manager's topology state for an atomic update to ensure
5527 * that it's valid. This includes checking whether there's enough bandwidth to
5528 * support the new timeslot allocations in the atomic update.
5529 *
5530 * Any atomic drivers supporting DP MST must make sure to call this or
5531 * the drm_dp_mst_atomic_check() function after checking the rest of their state
5532 * in their &drm_mode_config_funcs.atomic_check() callback.
5533 *
5534 * See also:
5535 * drm_dp_mst_atomic_check()
5536 * drm_dp_atomic_find_time_slots()
5537 * drm_dp_atomic_release_time_slots()
5538 *
5539 * Returns:
5540 * - 0 if the new state is valid
5541 * - %-ENOSPC, if the new state is invalid, because of BW limitation
5542 * @failing_port is set to:
5543 *
5544 * - The non-root port where a BW limit check failed
5545 * with all the ports downstream of @failing_port passing
5546 * the BW limit check.
5547 * The returned port pointer is valid until at least
5548 * one payload downstream of it exists.
5549 * - %NULL if the BW limit check failed at the root port
5550 * with all the ports downstream of the root port passing
5551 * the BW limit check.
5552 *
5553 * - %-EINVAL, if the new state is invalid, because the root port has
5554 * too many payloads.
5555 */
drm_dp_mst_atomic_check_mgr(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_port ** failing_port)5556 int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
5557 struct drm_dp_mst_topology_mgr *mgr,
5558 struct drm_dp_mst_topology_state *mst_state,
5559 struct drm_dp_mst_port **failing_port)
5560 {
5561 int ret;
5562
5563 *failing_port = NULL;
5564
5565 if (!mgr->mst_state)
5566 return 0;
5567
5568 mutex_lock(&mgr->lock);
5569 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5570 mst_state,
5571 failing_port);
5572 mutex_unlock(&mgr->lock);
5573
5574 if (ret < 0)
5575 return ret;
5576
5577 return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
5578 }
5579 EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr);
5580
5581 /**
5582 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5583 * atomic update is valid
5584 * @state: Pointer to the new &struct drm_dp_mst_topology_state
5585 *
5586 * Checks the given topology state for an atomic update to ensure that it's
5587 * valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the
5588 * atomic state. This includes checking whether there's enough bandwidth to
5589 * support the new timeslot allocations in the atomic update.
5590 *
5591 * Any atomic drivers supporting DP MST must make sure to call this after
5592 * checking the rest of their state in their
5593 * &drm_mode_config_funcs.atomic_check() callback.
5594 *
5595 * See also:
5596 * drm_dp_mst_atomic_check_mgr()
5597 * drm_dp_atomic_find_time_slots()
5598 * drm_dp_atomic_release_time_slots()
5599 *
5600 * Returns:
5601 * 0 if the new state is valid, negative error code otherwise.
5602 */
drm_dp_mst_atomic_check(struct drm_atomic_state * state)5603 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5604 {
5605 struct drm_dp_mst_topology_mgr *mgr;
5606 struct drm_dp_mst_topology_state *mst_state;
5607 int i, ret = 0;
5608
5609 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5610 struct drm_dp_mst_port *tmp_port;
5611
5612 ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port);
5613 if (ret)
5614 break;
5615 }
5616
5617 return ret;
5618 }
5619 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5620
5621 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5622 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5623 .atomic_destroy_state = drm_dp_mst_destroy_state,
5624 };
5625 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5626
5627 /**
5628 * drm_atomic_get_mst_topology_state: get MST topology state
5629 * @state: global atomic state
5630 * @mgr: MST topology manager, also the private object in this case
5631 *
5632 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5633 * state vtable so that the private object state returned is that of a MST
5634 * topology object.
5635 *
5636 * RETURNS:
5637 * The MST topology state or error pointer.
5638 */
drm_atomic_get_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)5639 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5640 struct drm_dp_mst_topology_mgr *mgr)
5641 {
5642 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5643 }
5644 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5645
5646 /**
5647 * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
5648 * @state: global atomic state
5649 * @mgr: MST topology manager, also the private object in this case
5650 *
5651 * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
5652 * state vtable so that the private object state returned is that of a MST
5653 * topology object.
5654 *
5655 * Returns:
5656 * The old MST topology state, or NULL if there's no topology state for this MST mgr
5657 * in the global atomic state
5658 */
5659 struct drm_dp_mst_topology_state *
drm_atomic_get_old_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)5660 drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
5661 struct drm_dp_mst_topology_mgr *mgr)
5662 {
5663 struct drm_private_state *old_priv_state =
5664 drm_atomic_get_old_private_obj_state(state, &mgr->base);
5665
5666 return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
5667 }
5668 EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
5669
5670 /**
5671 * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
5672 * @state: global atomic state
5673 * @mgr: MST topology manager, also the private object in this case
5674 *
5675 * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
5676 * state vtable so that the private object state returned is that of a MST
5677 * topology object.
5678 *
5679 * Returns:
5680 * The new MST topology state, or NULL if there's no topology state for this MST mgr
5681 * in the global atomic state
5682 */
5683 struct drm_dp_mst_topology_state *
drm_atomic_get_new_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)5684 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
5685 struct drm_dp_mst_topology_mgr *mgr)
5686 {
5687 struct drm_private_state *new_priv_state =
5688 drm_atomic_get_new_private_obj_state(state, &mgr->base);
5689
5690 return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
5691 }
5692 EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
5693
5694 /**
5695 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5696 * @mgr: manager struct to initialise
5697 * @dev: device providing this structure - for i2c addition.
5698 * @aux: DP helper aux channel to talk to this device
5699 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5700 * @max_payloads: maximum number of payloads this GPU can source
5701 * @conn_base_id: the connector object ID the MST device is connected to.
5702 *
5703 * Return 0 for success, or negative error code on failure
5704 */
drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr * mgr,struct drm_device * dev,struct drm_dp_aux * aux,int max_dpcd_transaction_bytes,int max_payloads,int conn_base_id)5705 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5706 struct drm_device *dev, struct drm_dp_aux *aux,
5707 int max_dpcd_transaction_bytes, int max_payloads,
5708 int conn_base_id)
5709 {
5710 struct drm_dp_mst_topology_state *mst_state;
5711
5712 mutex_init(&mgr->lock);
5713 mutex_init(&mgr->qlock);
5714 mutex_init(&mgr->delayed_destroy_lock);
5715 mutex_init(&mgr->up_req_lock);
5716 mutex_init(&mgr->probe_lock);
5717 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5718 mutex_init(&mgr->topology_ref_history_lock);
5719 stack_depot_init();
5720 #endif
5721 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5722 INIT_LIST_HEAD(&mgr->destroy_port_list);
5723 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5724 INIT_LIST_HEAD(&mgr->up_req_list);
5725
5726 /*
5727 * delayed_destroy_work will be queued on a dedicated WQ, so that any
5728 * requeuing will be also flushed when deiniting the topology manager.
5729 */
5730 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5731 if (mgr->delayed_destroy_wq == NULL)
5732 return -ENOMEM;
5733
5734 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5735 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5736 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5737 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5738 init_waitqueue_head(&mgr->tx_waitq);
5739 mgr->dev = dev;
5740 mgr->aux = aux;
5741 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5742 mgr->max_payloads = max_payloads;
5743 mgr->conn_base_id = conn_base_id;
5744
5745 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5746 if (mst_state == NULL)
5747 return -ENOMEM;
5748
5749 mst_state->total_avail_slots = 63;
5750 mst_state->start_slot = 1;
5751
5752 mst_state->mgr = mgr;
5753 INIT_LIST_HEAD(&mst_state->payloads);
5754
5755 drm_atomic_private_obj_init(dev, &mgr->base,
5756 &mst_state->base,
5757 &drm_dp_mst_topology_state_funcs);
5758
5759 return 0;
5760 }
5761 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5762
5763 /**
5764 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5765 * @mgr: manager to destroy
5766 */
drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr * mgr)5767 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5768 {
5769 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5770 flush_work(&mgr->work);
5771 /* The following will also drain any requeued work on the WQ. */
5772 if (mgr->delayed_destroy_wq) {
5773 destroy_workqueue(mgr->delayed_destroy_wq);
5774 mgr->delayed_destroy_wq = NULL;
5775 }
5776 mgr->dev = NULL;
5777 mgr->aux = NULL;
5778 drm_atomic_private_obj_fini(&mgr->base);
5779 mgr->funcs = NULL;
5780
5781 mutex_destroy(&mgr->delayed_destroy_lock);
5782 mutex_destroy(&mgr->qlock);
5783 mutex_destroy(&mgr->lock);
5784 mutex_destroy(&mgr->up_req_lock);
5785 mutex_destroy(&mgr->probe_lock);
5786 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5787 mutex_destroy(&mgr->topology_ref_history_lock);
5788 #endif
5789 }
5790 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5791
remote_i2c_read_ok(const struct i2c_msg msgs[],int num)5792 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5793 {
5794 int i;
5795
5796 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5797 return false;
5798
5799 for (i = 0; i < num - 1; i++) {
5800 if (msgs[i].flags & I2C_M_RD ||
5801 msgs[i].len > 0xff)
5802 return false;
5803 }
5804
5805 return msgs[num - 1].flags & I2C_M_RD &&
5806 msgs[num - 1].len <= 0xff;
5807 }
5808
remote_i2c_write_ok(const struct i2c_msg msgs[],int num)5809 static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5810 {
5811 int i;
5812
5813 for (i = 0; i < num - 1; i++) {
5814 if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5815 msgs[i].len > 0xff)
5816 return false;
5817 }
5818
5819 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5820 }
5821
drm_dp_mst_i2c_read(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port,struct i2c_msg * msgs,int num)5822 static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5823 struct drm_dp_mst_port *port,
5824 struct i2c_msg *msgs, int num)
5825 {
5826 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5827 unsigned int i;
5828 struct drm_dp_sideband_msg_req_body msg;
5829 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5830 int ret;
5831
5832 memset(&msg, 0, sizeof(msg));
5833 msg.req_type = DP_REMOTE_I2C_READ;
5834 msg.u.i2c_read.num_transactions = num - 1;
5835 msg.u.i2c_read.port_number = port->port_num;
5836 for (i = 0; i < num - 1; i++) {
5837 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5838 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5839 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5840 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5841 }
5842 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5843 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5844
5845 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5846 if (!txmsg) {
5847 ret = -ENOMEM;
5848 goto out;
5849 }
5850
5851 txmsg->dst = mstb;
5852 drm_dp_encode_sideband_req(&msg, txmsg);
5853
5854 drm_dp_queue_down_tx(mgr, txmsg);
5855
5856 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5857 if (ret > 0) {
5858
5859 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5860 ret = -EREMOTEIO;
5861 goto out;
5862 }
5863 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5864 ret = -EIO;
5865 goto out;
5866 }
5867 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5868 ret = num;
5869 }
5870 out:
5871 kfree(txmsg);
5872 return ret;
5873 }
5874
drm_dp_mst_i2c_write(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port,struct i2c_msg * msgs,int num)5875 static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5876 struct drm_dp_mst_port *port,
5877 struct i2c_msg *msgs, int num)
5878 {
5879 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5880 unsigned int i;
5881 struct drm_dp_sideband_msg_req_body msg;
5882 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5883 int ret;
5884
5885 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5886 if (!txmsg) {
5887 ret = -ENOMEM;
5888 goto out;
5889 }
5890 for (i = 0; i < num; i++) {
5891 memset(&msg, 0, sizeof(msg));
5892 msg.req_type = DP_REMOTE_I2C_WRITE;
5893 msg.u.i2c_write.port_number = port->port_num;
5894 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5895 msg.u.i2c_write.num_bytes = msgs[i].len;
5896 msg.u.i2c_write.bytes = msgs[i].buf;
5897
5898 memset(txmsg, 0, sizeof(*txmsg));
5899 txmsg->dst = mstb;
5900
5901 drm_dp_encode_sideband_req(&msg, txmsg);
5902 drm_dp_queue_down_tx(mgr, txmsg);
5903
5904 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5905 if (ret > 0) {
5906 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5907 ret = -EREMOTEIO;
5908 goto out;
5909 }
5910 } else {
5911 goto out;
5912 }
5913 }
5914 ret = num;
5915 out:
5916 kfree(txmsg);
5917 return ret;
5918 }
5919
5920 /* I2C device */
drm_dp_mst_i2c_xfer(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)5921 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5922 struct i2c_msg *msgs, int num)
5923 {
5924 struct drm_dp_aux *aux = adapter->algo_data;
5925 struct drm_dp_mst_port *port =
5926 container_of(aux, struct drm_dp_mst_port, aux);
5927 struct drm_dp_mst_branch *mstb;
5928 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5929 int ret;
5930
5931 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5932 if (!mstb)
5933 return -EREMOTEIO;
5934
5935 if (remote_i2c_read_ok(msgs, num)) {
5936 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5937 } else if (remote_i2c_write_ok(msgs, num)) {
5938 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5939 } else {
5940 drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
5941 ret = -EIO;
5942 }
5943
5944 drm_dp_mst_topology_put_mstb(mstb);
5945 return ret;
5946 }
5947
drm_dp_mst_i2c_functionality(struct i2c_adapter * adapter)5948 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5949 {
5950 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5951 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5952 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5953 I2C_FUNC_10BIT_ADDR;
5954 }
5955
5956 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5957 .functionality = drm_dp_mst_i2c_functionality,
5958 .master_xfer = drm_dp_mst_i2c_xfer,
5959 };
5960
5961 /**
5962 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5963 * @port: The port to add the I2C bus on
5964 *
5965 * Returns 0 on success or a negative error code on failure.
5966 */
drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port * port)5967 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5968 {
5969 struct drm_dp_aux *aux = &port->aux;
5970 struct device *parent_dev = port->mgr->dev->dev;
5971
5972 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5973 aux->ddc.algo_data = aux;
5974 aux->ddc.retries = 3;
5975
5976 aux->ddc.owner = THIS_MODULE;
5977 /* FIXME: set the kdev of the port's connector as parent */
5978 aux->ddc.dev.parent = parent_dev;
5979 aux->ddc.dev.of_node = parent_dev->of_node;
5980
5981 strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
5982 sizeof(aux->ddc.name));
5983
5984 return i2c_add_adapter(&aux->ddc);
5985 }
5986
5987 /**
5988 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5989 * @port: The port to remove the I2C bus from
5990 */
drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port * port)5991 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5992 {
5993 i2c_del_adapter(&port->aux.ddc);
5994 }
5995
5996 /**
5997 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5998 * @port: The port to check
5999 *
6000 * A single physical MST hub object can be represented in the topology
6001 * by multiple branches, with virtual ports between those branches.
6002 *
6003 * As of DP1.4, An MST hub with internal (virtual) ports must expose
6004 * certain DPCD registers over those ports. See sections 2.6.1.1.1
6005 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
6006 *
6007 * May acquire mgr->lock
6008 *
6009 * Returns:
6010 * true if the port is a virtual DP peer device, false otherwise
6011 */
drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port * port)6012 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
6013 {
6014 struct drm_dp_mst_port *downstream_port;
6015
6016 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
6017 return false;
6018
6019 /* Virtual DP Sink (Internal Display Panel) */
6020 if (drm_dp_mst_port_is_logical(port))
6021 return true;
6022
6023 /* DP-to-HDMI Protocol Converter */
6024 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
6025 !port->mcs &&
6026 port->ldps)
6027 return true;
6028
6029 /* DP-to-DP */
6030 mutex_lock(&port->mgr->lock);
6031 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
6032 port->mstb &&
6033 port->mstb->num_ports == 2) {
6034 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
6035 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
6036 !downstream_port->input) {
6037 mutex_unlock(&port->mgr->lock);
6038 return true;
6039 }
6040 }
6041 }
6042 mutex_unlock(&port->mgr->lock);
6043
6044 return false;
6045 }
6046
6047 /**
6048 * drm_dp_mst_aux_for_parent() - Get the AUX device for an MST port's parent
6049 * @port: MST port whose parent's AUX device is returned
6050 *
6051 * Return the AUX device for @port's parent or NULL if port's parent is the
6052 * root port.
6053 */
drm_dp_mst_aux_for_parent(struct drm_dp_mst_port * port)6054 struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port)
6055 {
6056 if (!port->parent || !port->parent->port_parent)
6057 return NULL;
6058
6059 return &port->parent->port_parent->aux;
6060 }
6061 EXPORT_SYMBOL(drm_dp_mst_aux_for_parent);
6062
6063 /**
6064 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
6065 * @port: The port to check. A leaf of the MST tree with an attached display.
6066 *
6067 * Depending on the situation, DSC may be enabled via the endpoint aux,
6068 * the immediately upstream aux, or the connector's physical aux.
6069 *
6070 * This is both the correct aux to read DSC_CAPABILITY and the
6071 * correct aux to write DSC_ENABLED.
6072 *
6073 * This operation can be expensive (up to four aux reads), so
6074 * the caller should cache the return.
6075 *
6076 * Returns:
6077 * NULL if DSC cannot be enabled on this port, otherwise the aux device
6078 */
drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port * port)6079 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
6080 {
6081 struct drm_dp_mst_port *immediate_upstream_port;
6082 struct drm_dp_aux *immediate_upstream_aux;
6083 struct drm_dp_mst_port *fec_port;
6084 struct drm_dp_desc desc = {};
6085 u8 upstream_dsc;
6086 u8 endpoint_fec;
6087 u8 endpoint_dsc;
6088
6089 if (!port)
6090 return NULL;
6091
6092 if (port->parent->port_parent)
6093 immediate_upstream_port = port->parent->port_parent;
6094 else
6095 immediate_upstream_port = NULL;
6096
6097 fec_port = immediate_upstream_port;
6098 while (fec_port) {
6099 /*
6100 * Each physical link (i.e. not a virtual port) between the
6101 * output and the primary device must support FEC
6102 */
6103 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
6104 !fec_port->fec_capable)
6105 return NULL;
6106
6107 fec_port = fec_port->parent->port_parent;
6108 }
6109
6110 /* DP-to-DP peer device */
6111 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
6112 if (drm_dp_dpcd_read_data(&port->aux,
6113 DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
6114 return NULL;
6115 if (drm_dp_dpcd_read_data(&port->aux,
6116 DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
6117 return NULL;
6118 if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux,
6119 DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
6120 return NULL;
6121
6122 /* Enpoint decompression with DP-to-DP peer device */
6123 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
6124 (endpoint_fec & DP_FEC_CAPABLE) &&
6125 (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
6126 port->passthrough_aux = &immediate_upstream_port->aux;
6127 return &port->aux;
6128 }
6129
6130 /* Virtual DPCD decompression with DP-to-DP peer device */
6131 return &immediate_upstream_port->aux;
6132 }
6133
6134 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
6135 if (drm_dp_mst_is_virtual_dpcd(port))
6136 return &port->aux;
6137
6138 /*
6139 * Synaptics quirk
6140 * Applies to ports for which:
6141 * - Physical aux has Synaptics OUI
6142 * - DPv1.4 or higher
6143 * - Port is on primary branch device
6144 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
6145 */
6146 if (immediate_upstream_port)
6147 immediate_upstream_aux = &immediate_upstream_port->aux;
6148 else
6149 immediate_upstream_aux = port->mgr->aux;
6150
6151 if (drm_dp_read_desc(immediate_upstream_aux, &desc, true))
6152 return NULL;
6153
6154 if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) {
6155 u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
6156
6157 if (drm_dp_dpcd_read_data(immediate_upstream_aux,
6158 DP_DSC_SUPPORT, &upstream_dsc, 1) < 0)
6159 return NULL;
6160
6161 if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
6162 return NULL;
6163
6164 if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0)
6165 return NULL;
6166
6167 if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
6168 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
6169 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
6170 != DP_DWN_STRM_PORT_TYPE_ANALOG)))
6171 return immediate_upstream_aux;
6172 }
6173
6174 /*
6175 * The check below verifies if the MST sink
6176 * connected to the GPU is capable of DSC -
6177 * therefore the endpoint needs to be
6178 * both DSC and FEC capable.
6179 */
6180 if (drm_dp_dpcd_read_data(&port->aux,
6181 DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0)
6182 return NULL;
6183 if (drm_dp_dpcd_read_data(&port->aux,
6184 DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0)
6185 return NULL;
6186 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
6187 (endpoint_fec & DP_FEC_CAPABLE))
6188 return &port->aux;
6189
6190 return NULL;
6191 }
6192 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
6193