2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/bitfield.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/i2c.h>
27 #include <linux/init.h>
28 #include <linux/kernel.h>
29 #include <linux/random.h>
30 #include <linux/sched.h>
31 #include <linux/seq_file.h>
32 #include <linux/iopoll.h>
34 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
35 #include <linux/stacktrace.h>
36 #include <linux/sort.h>
37 #include <linux/timekeeping.h>
38 #include <linux/math64.h>
41 #include <drm/drm_atomic.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_dp_mst_helper.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_print.h>
46 #include <drm/drm_probe_helper.h>
48 #include "drm_crtc_helper_internal.h"
49 #include "drm_dp_mst_topology_internal.h"
54 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
55 * protocol. The helpers contain a topology manager and bandwidth manager.
56 * The helpers encapsulate the sending and received of sideband msgs.
58 struct drm_dp_pending_up_req {
59 struct drm_dp_sideband_msg_hdr hdr;
60 struct drm_dp_sideband_msg_req_body msg;
61 struct list_head next;
64 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
67 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
69 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
71 struct drm_dp_payload *payload);
73 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
74 struct drm_dp_mst_port *port,
75 int offset, int size, u8 *bytes);
76 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
77 struct drm_dp_mst_port *port,
78 int offset, int size, u8 *bytes);
80 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
81 struct drm_dp_mst_branch *mstb);
84 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
85 struct drm_dp_mst_branch *mstb);
87 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
88 struct drm_dp_mst_branch *mstb,
89 struct drm_dp_mst_port *port);
90 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
93 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
94 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
95 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
97 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
98 struct drm_dp_mst_branch *branch);
100 #define DBG_PREFIX "[dp_mst]"
102 #define DP_STR(x) [DP_ ## x] = #x
104 static const char *drm_dp_mst_req_type_str(u8 req_type)
106 static const char * const req_type_str[] = {
107 DP_STR(GET_MSG_TRANSACTION_VERSION),
108 DP_STR(LINK_ADDRESS),
109 DP_STR(CONNECTION_STATUS_NOTIFY),
110 DP_STR(ENUM_PATH_RESOURCES),
111 DP_STR(ALLOCATE_PAYLOAD),
112 DP_STR(QUERY_PAYLOAD),
113 DP_STR(RESOURCE_STATUS_NOTIFY),
114 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
115 DP_STR(REMOTE_DPCD_READ),
116 DP_STR(REMOTE_DPCD_WRITE),
117 DP_STR(REMOTE_I2C_READ),
118 DP_STR(REMOTE_I2C_WRITE),
119 DP_STR(POWER_UP_PHY),
120 DP_STR(POWER_DOWN_PHY),
121 DP_STR(SINK_EVENT_NOTIFY),
122 DP_STR(QUERY_STREAM_ENC_STATUS),
125 if (req_type >= ARRAY_SIZE(req_type_str) ||
126 !req_type_str[req_type])
129 return req_type_str[req_type];
133 #define DP_STR(x) [DP_NAK_ ## x] = #x
135 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
137 static const char * const nak_reason_str[] = {
138 DP_STR(WRITE_FAILURE),
139 DP_STR(INVALID_READ),
143 DP_STR(LINK_FAILURE),
144 DP_STR(NO_RESOURCES),
147 DP_STR(ALLOCATE_FAIL),
150 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
151 !nak_reason_str[nak_reason])
154 return nak_reason_str[nak_reason];
158 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
160 static const char *drm_dp_mst_sideband_tx_state_str(int state)
162 static const char * const sideband_reason_str[] = {
170 if (state >= ARRAY_SIZE(sideband_reason_str) ||
171 !sideband_reason_str[state])
174 return sideband_reason_str[state];
178 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
183 for (i = 0; i < lct; i++) {
185 unpacked_rad[i] = rad[i / 2] >> 4;
187 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
190 /* TODO: Eventually add something to printk so we can format the rad
193 return snprintf(out, len, "%*phC", lct, unpacked_rad);
196 /* sideband msg handling */
197 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
202 int number_of_bits = num_nibbles * 4;
205 while (number_of_bits != 0) {
208 remainder |= (data[array_index] & bitmask) >> bitshift;
216 if ((remainder & 0x10) == 0x10)
221 while (number_of_bits != 0) {
224 if ((remainder & 0x10) != 0)
231 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
236 int number_of_bits = number_of_bytes * 8;
239 while (number_of_bits != 0) {
242 remainder |= (data[array_index] & bitmask) >> bitshift;
250 if ((remainder & 0x100) == 0x100)
255 while (number_of_bits != 0) {
258 if ((remainder & 0x100) != 0)
262 return remainder & 0xff;
264 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
268 size += (hdr->lct / 2);
272 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
279 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
280 for (i = 0; i < (hdr->lct / 2); i++)
281 buf[idx++] = hdr->rad[i];
282 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
283 (hdr->msg_len & 0x3f);
284 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
286 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
287 buf[idx - 1] |= (crc4 & 0xf);
292 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
293 u8 *buf, int buflen, u8 *hdrlen)
303 len += ((buf[0] & 0xf0) >> 4) / 2;
306 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
308 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
309 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
313 hdr->lct = (buf[0] & 0xf0) >> 4;
314 hdr->lcr = (buf[0] & 0xf);
316 for (i = 0; i < (hdr->lct / 2); i++)
317 hdr->rad[i] = buf[idx++];
318 hdr->broadcast = (buf[idx] >> 7) & 0x1;
319 hdr->path_msg = (buf[idx] >> 6) & 0x1;
320 hdr->msg_len = buf[idx] & 0x3f;
322 hdr->somt = (buf[idx] >> 7) & 0x1;
323 hdr->eomt = (buf[idx] >> 6) & 0x1;
324 hdr->seqno = (buf[idx] >> 4) & 0x1;
331 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
332 struct drm_dp_sideband_msg_tx *raw)
338 buf[idx++] = req->req_type & 0x7f;
340 switch (req->req_type) {
341 case DP_ENUM_PATH_RESOURCES:
342 case DP_POWER_DOWN_PHY:
343 case DP_POWER_UP_PHY:
344 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
347 case DP_ALLOCATE_PAYLOAD:
348 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
349 (req->u.allocate_payload.number_sdp_streams & 0xf);
351 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
353 buf[idx] = (req->u.allocate_payload.pbn >> 8);
355 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
357 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
358 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
359 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
362 if (req->u.allocate_payload.number_sdp_streams & 1) {
363 i = req->u.allocate_payload.number_sdp_streams - 1;
364 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
368 case DP_QUERY_PAYLOAD:
369 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
371 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
374 case DP_REMOTE_DPCD_READ:
375 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
376 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
378 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
380 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
382 buf[idx] = (req->u.dpcd_read.num_bytes);
386 case DP_REMOTE_DPCD_WRITE:
387 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
388 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
390 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
392 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
394 buf[idx] = (req->u.dpcd_write.num_bytes);
396 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
397 idx += req->u.dpcd_write.num_bytes;
399 case DP_REMOTE_I2C_READ:
400 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
401 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
403 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
404 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
406 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
408 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
409 idx += req->u.i2c_read.transactions[i].num_bytes;
411 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
412 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
415 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
417 buf[idx] = (req->u.i2c_read.num_bytes_read);
421 case DP_REMOTE_I2C_WRITE:
422 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
424 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
426 buf[idx] = (req->u.i2c_write.num_bytes);
428 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
429 idx += req->u.i2c_write.num_bytes;
431 case DP_QUERY_STREAM_ENC_STATUS: {
432 const struct drm_dp_query_stream_enc_status *msg;
434 msg = &req->u.enc_status;
435 buf[idx] = msg->stream_id;
437 memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
438 idx += sizeof(msg->client_id);
440 buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
441 buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
442 buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
443 buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
450 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
452 /* Decode a sideband request we've encoded, mainly used for debugging */
454 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
455 struct drm_dp_sideband_msg_req_body *req)
457 const u8 *buf = raw->msg;
460 req->req_type = buf[idx++] & 0x7f;
461 switch (req->req_type) {
462 case DP_ENUM_PATH_RESOURCES:
463 case DP_POWER_DOWN_PHY:
464 case DP_POWER_UP_PHY:
465 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
467 case DP_ALLOCATE_PAYLOAD:
469 struct drm_dp_allocate_payload *a =
470 &req->u.allocate_payload;
472 a->number_sdp_streams = buf[idx] & 0xf;
473 a->port_number = (buf[idx] >> 4) & 0xf;
475 WARN_ON(buf[++idx] & 0x80);
476 a->vcpi = buf[idx] & 0x7f;
478 a->pbn = buf[++idx] << 8;
479 a->pbn |= buf[++idx];
482 for (i = 0; i < a->number_sdp_streams; i++) {
483 a->sdp_stream_sink[i] =
484 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
488 case DP_QUERY_PAYLOAD:
489 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
490 WARN_ON(buf[++idx] & 0x80);
491 req->u.query_payload.vcpi = buf[idx] & 0x7f;
493 case DP_REMOTE_DPCD_READ:
495 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
497 r->port_number = (buf[idx] >> 4) & 0xf;
499 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
500 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
501 r->dpcd_address |= buf[++idx] & 0xff;
503 r->num_bytes = buf[++idx];
506 case DP_REMOTE_DPCD_WRITE:
508 struct drm_dp_remote_dpcd_write *w =
511 w->port_number = (buf[idx] >> 4) & 0xf;
513 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
514 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
515 w->dpcd_address |= buf[++idx] & 0xff;
517 w->num_bytes = buf[++idx];
519 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
525 case DP_REMOTE_I2C_READ:
527 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
528 struct drm_dp_remote_i2c_read_tx *tx;
531 r->num_transactions = buf[idx] & 0x3;
532 r->port_number = (buf[idx] >> 4) & 0xf;
533 for (i = 0; i < r->num_transactions; i++) {
534 tx = &r->transactions[i];
536 tx->i2c_dev_id = buf[++idx] & 0x7f;
537 tx->num_bytes = buf[++idx];
538 tx->bytes = kmemdup(&buf[++idx],
545 idx += tx->num_bytes;
546 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
547 tx->i2c_transaction_delay = buf[idx] & 0xf;
551 for (i = 0; i < r->num_transactions; i++) {
552 tx = &r->transactions[i];
558 r->read_i2c_device_id = buf[++idx] & 0x7f;
559 r->num_bytes_read = buf[++idx];
562 case DP_REMOTE_I2C_WRITE:
564 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
566 w->port_number = (buf[idx] >> 4) & 0xf;
567 w->write_i2c_device_id = buf[++idx] & 0x7f;
568 w->num_bytes = buf[++idx];
569 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
575 case DP_QUERY_STREAM_ENC_STATUS:
576 req->u.enc_status.stream_id = buf[idx++];
577 for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
578 req->u.enc_status.client_id[i] = buf[idx++];
580 req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
582 req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
584 req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
586 req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
593 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
596 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
597 int indent, struct drm_printer *printer)
601 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
602 if (req->req_type == DP_LINK_ADDRESS) {
603 /* No contents to print */
604 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
608 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
611 switch (req->req_type) {
612 case DP_ENUM_PATH_RESOURCES:
613 case DP_POWER_DOWN_PHY:
614 case DP_POWER_UP_PHY:
615 P("port=%d\n", req->u.port_num.port_number);
617 case DP_ALLOCATE_PAYLOAD:
618 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
619 req->u.allocate_payload.port_number,
620 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
621 req->u.allocate_payload.number_sdp_streams,
622 req->u.allocate_payload.number_sdp_streams,
623 req->u.allocate_payload.sdp_stream_sink);
625 case DP_QUERY_PAYLOAD:
626 P("port=%d vcpi=%d\n",
627 req->u.query_payload.port_number,
628 req->u.query_payload.vcpi);
630 case DP_REMOTE_DPCD_READ:
631 P("port=%d dpcd_addr=%05x len=%d\n",
632 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
633 req->u.dpcd_read.num_bytes);
635 case DP_REMOTE_DPCD_WRITE:
636 P("port=%d addr=%05x len=%d: %*ph\n",
637 req->u.dpcd_write.port_number,
638 req->u.dpcd_write.dpcd_address,
639 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
640 req->u.dpcd_write.bytes);
642 case DP_REMOTE_I2C_READ:
643 P("port=%d num_tx=%d id=%d size=%d:\n",
644 req->u.i2c_read.port_number,
645 req->u.i2c_read.num_transactions,
646 req->u.i2c_read.read_i2c_device_id,
647 req->u.i2c_read.num_bytes_read);
650 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
651 const struct drm_dp_remote_i2c_read_tx *rtx =
652 &req->u.i2c_read.transactions[i];
654 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
655 i, rtx->i2c_dev_id, rtx->num_bytes,
656 rtx->no_stop_bit, rtx->i2c_transaction_delay,
657 rtx->num_bytes, rtx->bytes);
660 case DP_REMOTE_I2C_WRITE:
661 P("port=%d id=%d size=%d: %*ph\n",
662 req->u.i2c_write.port_number,
663 req->u.i2c_write.write_i2c_device_id,
664 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
665 req->u.i2c_write.bytes);
667 case DP_QUERY_STREAM_ENC_STATUS:
668 P("stream_id=%u client_id=%*ph stream_event=%x "
669 "valid_event=%d stream_behavior=%x valid_behavior=%d",
670 req->u.enc_status.stream_id,
671 (int)ARRAY_SIZE(req->u.enc_status.client_id),
672 req->u.enc_status.client_id, req->u.enc_status.stream_event,
673 req->u.enc_status.valid_stream_event,
674 req->u.enc_status.stream_behavior,
675 req->u.enc_status.valid_stream_behavior);
683 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
686 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
687 const struct drm_dp_sideband_msg_tx *txmsg)
689 struct drm_dp_sideband_msg_req_body req;
694 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
696 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
697 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
698 drm_dp_mst_sideband_tx_state_str(txmsg->state),
699 txmsg->path_msg, buf);
701 ret = drm_dp_decode_sideband_req(txmsg, &req);
703 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
706 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
708 switch (req.req_type) {
709 case DP_REMOTE_DPCD_WRITE:
710 kfree(req.u.dpcd_write.bytes);
712 case DP_REMOTE_I2C_READ:
713 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
714 kfree(req.u.i2c_read.transactions[i].bytes);
716 case DP_REMOTE_I2C_WRITE:
717 kfree(req.u.i2c_write.bytes);
722 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
726 crc4 = drm_dp_msg_data_crc4(msg, len);
730 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
731 struct drm_dp_sideband_msg_tx *raw)
736 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
741 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
742 struct drm_dp_sideband_msg_hdr *hdr,
746 * ignore out-of-order messages or messages that are part of a
749 if (!hdr->somt && !msg->have_somt)
752 /* get length contained in this portion */
753 msg->curchunk_idx = 0;
754 msg->curchunk_len = hdr->msg_len;
755 msg->curchunk_hdrlen = hdrlen;
757 /* we have already gotten an somt - don't bother parsing */
758 if (hdr->somt && msg->have_somt)
762 memcpy(&msg->initial_hdr, hdr,
763 sizeof(struct drm_dp_sideband_msg_hdr));
764 msg->have_somt = true;
767 msg->have_eomt = true;
772 /* this adds a chunk of msg to the builder to get the final msg */
773 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
774 u8 *replybuf, u8 replybuflen)
778 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
779 msg->curchunk_idx += replybuflen;
781 if (msg->curchunk_idx >= msg->curchunk_len) {
783 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
784 if (crc4 != msg->chunk[msg->curchunk_len - 1])
785 print_hex_dump(KERN_DEBUG, "wrong crc",
786 DUMP_PREFIX_NONE, 16, 1,
787 msg->chunk, msg->curchunk_len, false);
788 /* copy chunk into bigger msg */
789 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
790 msg->curlen += msg->curchunk_len - 1;
795 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
796 struct drm_dp_sideband_msg_reply_body *repmsg)
801 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
803 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
805 if (idx > raw->curlen)
807 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
808 if (raw->msg[idx] & 0x80)
809 repmsg->u.link_addr.ports[i].input_port = 1;
811 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
812 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
815 if (idx > raw->curlen)
817 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
818 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
819 if (repmsg->u.link_addr.ports[i].input_port == 0)
820 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
822 if (idx > raw->curlen)
824 if (repmsg->u.link_addr.ports[i].input_port == 0) {
825 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
827 if (idx > raw->curlen)
829 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
831 if (idx > raw->curlen)
833 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
834 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
838 if (idx > raw->curlen)
844 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
848 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
849 struct drm_dp_sideband_msg_reply_body *repmsg)
853 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
855 if (idx > raw->curlen)
857 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
859 if (idx > raw->curlen)
862 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
865 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
869 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
870 struct drm_dp_sideband_msg_reply_body *repmsg)
874 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
876 if (idx > raw->curlen)
880 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
884 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
885 struct drm_dp_sideband_msg_reply_body *repmsg)
889 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
891 if (idx > raw->curlen)
893 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
896 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
899 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
903 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
904 struct drm_dp_sideband_msg_reply_body *repmsg)
908 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
909 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
911 if (idx > raw->curlen)
913 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
915 if (idx > raw->curlen)
917 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
919 if (idx > raw->curlen)
923 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
927 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
928 struct drm_dp_sideband_msg_reply_body *repmsg)
932 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
934 if (idx > raw->curlen)
936 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
938 if (idx > raw->curlen)
940 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
942 if (idx > raw->curlen)
946 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
950 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
951 struct drm_dp_sideband_msg_reply_body *repmsg)
955 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
957 if (idx > raw->curlen)
959 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
961 if (idx > raw->curlen)
965 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
969 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
970 struct drm_dp_sideband_msg_reply_body *repmsg)
974 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
976 if (idx > raw->curlen) {
977 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
985 drm_dp_sideband_parse_query_stream_enc_status(
986 struct drm_dp_sideband_msg_rx *raw,
987 struct drm_dp_sideband_msg_reply_body *repmsg)
989 struct drm_dp_query_stream_enc_status_ack_reply *reply;
991 reply = &repmsg->u.enc_status;
993 reply->stream_id = raw->msg[3];
995 reply->reply_signed = raw->msg[2] & BIT(0);
998 * NOTE: It's my impression from reading the spec that the below parsing
999 * is correct. However I noticed while testing with an HDCP 1.4 display
1000 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
1001 * would expect both bits to be set. So keep the parsing following the
1002 * spec, but beware reality might not match the spec (at least for some
1005 reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1006 reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1008 reply->query_capable_device_present = raw->msg[2] & BIT(5);
1009 reply->legacy_device_present = raw->msg[2] & BIT(6);
1010 reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1012 reply->auth_completed = !!(raw->msg[1] & BIT(3));
1013 reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1014 reply->repeater_present = !!(raw->msg[1] & BIT(5));
1015 reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1020 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
1021 struct drm_dp_sideband_msg_reply_body *msg)
1023 memset(msg, 0, sizeof(*msg));
1024 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
1025 msg->req_type = (raw->msg[0] & 0x7f);
1027 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
1028 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
1029 msg->u.nak.reason = raw->msg[17];
1030 msg->u.nak.nak_data = raw->msg[18];
1034 switch (msg->req_type) {
1035 case DP_LINK_ADDRESS:
1036 return drm_dp_sideband_parse_link_address(raw, msg);
1037 case DP_QUERY_PAYLOAD:
1038 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
1039 case DP_REMOTE_DPCD_READ:
1040 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
1041 case DP_REMOTE_DPCD_WRITE:
1042 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
1043 case DP_REMOTE_I2C_READ:
1044 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1045 case DP_REMOTE_I2C_WRITE:
1046 return true; /* since there's nothing to parse */
1047 case DP_ENUM_PATH_RESOURCES:
1048 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
1049 case DP_ALLOCATE_PAYLOAD:
1050 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
1051 case DP_POWER_DOWN_PHY:
1052 case DP_POWER_UP_PHY:
1053 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1054 case DP_CLEAR_PAYLOAD_ID_TABLE:
1055 return true; /* since there's nothing to parse */
1056 case DP_QUERY_STREAM_ENC_STATUS:
1057 return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
1059 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
1060 drm_dp_mst_req_type_str(msg->req_type));
1065 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
1066 struct drm_dp_sideband_msg_req_body *msg)
1070 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1072 if (idx > raw->curlen)
1075 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
1077 if (idx > raw->curlen)
1080 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
1081 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
1082 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
1083 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
1084 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
1088 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
1092 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
1093 struct drm_dp_sideband_msg_req_body *msg)
1097 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1099 if (idx > raw->curlen)
1102 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1104 if (idx > raw->curlen)
1107 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1111 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1115 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1116 struct drm_dp_sideband_msg_req_body *msg)
1118 memset(msg, 0, sizeof(*msg));
1119 msg->req_type = (raw->msg[0] & 0x7f);
1121 switch (msg->req_type) {
1122 case DP_CONNECTION_STATUS_NOTIFY:
1123 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1124 case DP_RESOURCE_STATUS_NOTIFY:
1125 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1127 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1128 drm_dp_mst_req_type_str(msg->req_type));
1133 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1134 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1136 struct drm_dp_sideband_msg_req_body req;
1138 req.req_type = DP_REMOTE_DPCD_WRITE;
1139 req.u.dpcd_write.port_number = port_num;
1140 req.u.dpcd_write.dpcd_address = offset;
1141 req.u.dpcd_write.num_bytes = num_bytes;
1142 req.u.dpcd_write.bytes = bytes;
1143 drm_dp_encode_sideband_req(&req, msg);
1146 static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1148 struct drm_dp_sideband_msg_req_body req;
1150 req.req_type = DP_LINK_ADDRESS;
1151 drm_dp_encode_sideband_req(&req, msg);
1154 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1156 struct drm_dp_sideband_msg_req_body req;
1158 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1159 drm_dp_encode_sideband_req(&req, msg);
1160 msg->path_msg = true;
1163 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1166 struct drm_dp_sideband_msg_req_body req;
1168 req.req_type = DP_ENUM_PATH_RESOURCES;
1169 req.u.port_num.port_number = port_num;
1170 drm_dp_encode_sideband_req(&req, msg);
1171 msg->path_msg = true;
1175 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1177 u8 vcpi, uint16_t pbn,
1178 u8 number_sdp_streams,
1179 u8 *sdp_stream_sink)
1181 struct drm_dp_sideband_msg_req_body req;
1183 memset(&req, 0, sizeof(req));
1184 req.req_type = DP_ALLOCATE_PAYLOAD;
1185 req.u.allocate_payload.port_number = port_num;
1186 req.u.allocate_payload.vcpi = vcpi;
1187 req.u.allocate_payload.pbn = pbn;
1188 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1189 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1190 number_sdp_streams);
1191 drm_dp_encode_sideband_req(&req, msg);
1192 msg->path_msg = true;
1195 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1196 int port_num, bool power_up)
1198 struct drm_dp_sideband_msg_req_body req;
1201 req.req_type = DP_POWER_UP_PHY;
1203 req.req_type = DP_POWER_DOWN_PHY;
1205 req.u.port_num.port_number = port_num;
1206 drm_dp_encode_sideband_req(&req, msg);
1207 msg->path_msg = true;
1211 build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1214 struct drm_dp_sideband_msg_req_body req;
1216 req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1217 req.u.enc_status.stream_id = stream_id;
1218 memcpy(req.u.enc_status.client_id, q_id,
1219 sizeof(req.u.enc_status.client_id));
1220 req.u.enc_status.stream_event = 0;
1221 req.u.enc_status.valid_stream_event = false;
1222 req.u.enc_status.stream_behavior = 0;
1223 req.u.enc_status.valid_stream_behavior = false;
1225 drm_dp_encode_sideband_req(&req, msg);
1229 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1230 struct drm_dp_vcpi *vcpi)
1234 mutex_lock(&mgr->payload_lock);
1235 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1236 if (ret > mgr->max_payloads) {
1238 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1242 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1243 if (vcpi_ret > mgr->max_payloads) {
1245 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1249 set_bit(ret, &mgr->payload_mask);
1250 set_bit(vcpi_ret, &mgr->vcpi_mask);
1251 vcpi->vcpi = vcpi_ret + 1;
1252 mgr->proposed_vcpis[ret - 1] = vcpi;
1254 mutex_unlock(&mgr->payload_lock);
1258 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1266 mutex_lock(&mgr->payload_lock);
1267 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1268 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1270 for (i = 0; i < mgr->max_payloads; i++) {
1271 if (mgr->proposed_vcpis[i] &&
1272 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1273 mgr->proposed_vcpis[i] = NULL;
1274 clear_bit(i + 1, &mgr->payload_mask);
1277 mutex_unlock(&mgr->payload_lock);
1280 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1281 struct drm_dp_sideband_msg_tx *txmsg)
1286 * All updates to txmsg->state are protected by mgr->qlock, and the two
1287 * cases we check here are terminal states. For those the barriers
1288 * provided by the wake_up/wait_event pair are enough.
1290 state = READ_ONCE(txmsg->state);
1291 return (state == DRM_DP_SIDEBAND_TX_RX ||
1292 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1295 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1296 struct drm_dp_sideband_msg_tx *txmsg)
1298 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1299 unsigned long wait_timeout = msecs_to_jiffies(4000);
1300 unsigned long wait_expires = jiffies + wait_timeout;
1305 * If the driver provides a way for this, change to
1306 * poll-waiting for the MST reply interrupt if we didn't receive
1307 * it for 50 msec. This would cater for cases where the HPD
1308 * pulse signal got lost somewhere, even though the sink raised
1309 * the corresponding MST interrupt correctly. One example is the
1310 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
1311 * filters out short pulses with a duration less than ~540 usec.
1313 * The poll period is 50 msec to avoid missing an interrupt
1314 * after the sink has cleared it (after a 110msec timeout
1315 * since it raised the interrupt).
1317 ret = wait_event_timeout(mgr->tx_waitq,
1318 check_txmsg_state(mgr, txmsg),
1319 mgr->cbs->poll_hpd_irq ?
1320 msecs_to_jiffies(50) :
1323 if (ret || !mgr->cbs->poll_hpd_irq ||
1324 time_after(jiffies, wait_expires))
1327 mgr->cbs->poll_hpd_irq(mgr);
1330 mutex_lock(&mgr->qlock);
1332 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1337 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1339 /* dump some state */
1343 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1344 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1345 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1346 list_del(&txmsg->next);
1349 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1350 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1352 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1354 mutex_unlock(&mgr->qlock);
1356 drm_dp_mst_kick_tx(mgr);
1360 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1362 struct drm_dp_mst_branch *mstb;
1364 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1370 memcpy(mstb->rad, rad, lct / 2);
1371 INIT_LIST_HEAD(&mstb->ports);
1372 kref_init(&mstb->topology_kref);
1373 kref_init(&mstb->malloc_kref);
1377 static void drm_dp_free_mst_branch_device(struct kref *kref)
1379 struct drm_dp_mst_branch *mstb =
1380 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1382 if (mstb->port_parent)
1383 drm_dp_mst_put_port_malloc(mstb->port_parent);
1389 * DOC: Branch device and port refcounting
1391 * Topology refcount overview
1392 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1394 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1395 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1396 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1398 * Topology refcounts are not exposed to drivers, and are handled internally
1399 * by the DP MST helpers. The helpers use them in order to prevent the
1400 * in-memory topology state from being changed in the middle of critical
1401 * operations like changing the internal state of payload allocations. This
1402 * means each branch and port will be considered to be connected to the rest
1403 * of the topology until its topology refcount reaches zero. Additionally,
1404 * for ports this means that their associated &struct drm_connector will stay
1405 * registered with userspace until the port's refcount reaches 0.
1407 * Malloc refcount overview
1408 * ~~~~~~~~~~~~~~~~~~~~~~~~
1410 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1411 * drm_dp_mst_branch allocated even after all of its topology references have
1412 * been dropped, so that the driver or MST helpers can safely access each
1413 * branch's last known state before it was disconnected from the topology.
1414 * When the malloc refcount of a port or branch reaches 0, the memory
1415 * allocation containing the &struct drm_dp_mst_branch or &struct
1416 * drm_dp_mst_port respectively will be freed.
1418 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1419 * to drivers. As of writing this documentation, there are no drivers that
1420 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1421 * helpers. Exposing this API to drivers in a race-free manner would take more
1422 * tweaking of the refcounting scheme, however patches are welcome provided
1423 * there is a legitimate driver usecase for this.
1425 * Refcount relationships in a topology
1426 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1428 * Let's take a look at why the relationship between topology and malloc
1429 * refcounts is designed the way it is.
1431 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1433 * An example of topology and malloc refs in a DP MST topology with two
1434 * active payloads. Topology refcount increments are indicated by solid
1435 * lines, and malloc refcount increments are indicated by dashed lines.
1436 * Each starts from the branch which incremented the refcount, and ends at
1437 * the branch to which the refcount belongs to, i.e. the arrow points the
1438 * same way as the C pointers used to reference a structure.
1440 * As you can see in the above figure, every branch increments the topology
1441 * refcount of its children, and increments the malloc refcount of its
1442 * parent. Additionally, every payload increments the malloc refcount of its
1443 * assigned port by 1.
1445 * So, what would happen if MSTB #3 from the above figure was unplugged from
1446 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1447 * topology would start to look like the figure below.
1449 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1451 * Ports and branch devices which have been released from memory are
1452 * colored grey, and references which have been removed are colored red.
1454 * Whenever a port or branch device's topology refcount reaches zero, it will
1455 * decrement the topology refcounts of all its children, the malloc refcount
1456 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1457 * #4, this means they both have been disconnected from the topology and freed
1458 * from memory. But, because payload #2 is still holding a reference to port
1459 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1460 * is still accessible from memory. This also means port #3 has not yet
1461 * decremented the malloc refcount of MSTB #3, so its &struct
1462 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1463 * malloc refcount reaches 0.
1465 * This relationship is necessary because in order to release payload #2, we
1466 * need to be able to figure out the last relative of port #3 that's still
1467 * connected to the topology. In this case, we would travel up the topology as
1470 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1472 * And finally, remove payload #2 by communicating with port #2 through
1473 * sideband transactions.
1477 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1479 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1481 * Increments &drm_dp_mst_branch.malloc_kref. When
1482 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1483 * will be released and @mstb may no longer be used.
1485 * See also: drm_dp_mst_put_mstb_malloc()
1488 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1490 kref_get(&mstb->malloc_kref);
1491 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1495 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1497 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1499 * Decrements &drm_dp_mst_branch.malloc_kref. When
1500 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1501 * will be released and @mstb may no longer be used.
1503 * See also: drm_dp_mst_get_mstb_malloc()
1506 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1508 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1509 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1512 static void drm_dp_free_mst_port(struct kref *kref)
1514 struct drm_dp_mst_port *port =
1515 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1517 drm_dp_mst_put_mstb_malloc(port->parent);
1522 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1523 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1525 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1526 * reaches 0, the memory allocation for @port will be released and @port may
1527 * no longer be used.
1529 * Because @port could potentially be freed at any time by the DP MST helpers
1530 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1531 * function, drivers that which to make use of &struct drm_dp_mst_port should
1532 * ensure that they grab at least one main malloc reference to their MST ports
1533 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1534 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1536 * See also: drm_dp_mst_put_port_malloc()
1539 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1541 kref_get(&port->malloc_kref);
1542 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1544 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1547 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1548 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1550 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1551 * reaches 0, the memory allocation for @port will be released and @port may
1552 * no longer be used.
1554 * See also: drm_dp_mst_get_port_malloc()
1557 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1559 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1560 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1562 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1564 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1566 #define STACK_DEPTH 8
1568 static noinline void
1569 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1570 struct drm_dp_mst_topology_ref_history *history,
1571 enum drm_dp_mst_topology_ref_type type)
1573 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1574 depot_stack_handle_t backtrace;
1575 ulong stack_entries[STACK_DEPTH];
1579 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1580 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1584 /* Try to find an existing entry for this backtrace */
1585 for (i = 0; i < history->len; i++) {
1586 if (history->entries[i].backtrace == backtrace) {
1587 entry = &history->entries[i];
1592 /* Otherwise add one */
1594 struct drm_dp_mst_topology_ref_entry *new;
1595 int new_len = history->len + 1;
1597 new = krealloc(history->entries, sizeof(*new) * new_len,
1602 entry = &new[history->len];
1603 history->len = new_len;
1604 history->entries = new;
1606 entry->backtrace = backtrace;
1611 entry->ts_nsec = ktime_get_ns();
1615 topology_ref_history_cmp(const void *a, const void *b)
1617 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1619 if (entry_a->ts_nsec > entry_b->ts_nsec)
1621 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1627 static inline const char *
1628 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1630 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1637 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1638 void *ptr, const char *type_str)
1640 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1641 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1650 /* First, sort the list so that it goes from oldest to newest
1653 sort(history->entries, history->len, sizeof(*history->entries),
1654 topology_ref_history_cmp, NULL);
1656 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1659 for (i = 0; i < history->len; i++) {
1660 const struct drm_dp_mst_topology_ref_entry *entry =
1661 &history->entries[i];
1664 u64 ts_nsec = entry->ts_nsec;
1665 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1667 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1668 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1670 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1672 topology_ref_type_to_str(entry->type),
1673 ts_nsec, rem_nsec / 1000, buf);
1676 /* Now free the history, since this is the only time we expose it */
1677 kfree(history->entries);
1682 static __always_inline void
1683 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1685 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1689 static __always_inline void
1690 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1692 __dump_topology_ref_history(&port->topology_ref_history, port,
1696 static __always_inline void
1697 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1698 enum drm_dp_mst_topology_ref_type type)
1700 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1703 static __always_inline void
1704 save_port_topology_ref(struct drm_dp_mst_port *port,
1705 enum drm_dp_mst_topology_ref_type type)
1707 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1711 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1713 mutex_lock(&mgr->topology_ref_history_lock);
1717 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1719 mutex_unlock(&mgr->topology_ref_history_lock);
1723 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1725 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1727 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1729 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1730 #define save_mstb_topology_ref(mstb, type)
1731 #define save_port_topology_ref(port, type)
1734 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1736 struct drm_dp_mst_branch *mstb =
1737 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1738 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1740 drm_dp_mst_dump_mstb_topology_history(mstb);
1742 INIT_LIST_HEAD(&mstb->destroy_next);
1745 * This can get called under mgr->mutex, so we need to perform the
1746 * actual destruction of the mstb in another worker
1748 mutex_lock(&mgr->delayed_destroy_lock);
1749 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1750 mutex_unlock(&mgr->delayed_destroy_lock);
1751 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1755 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1756 * branch device unless it's zero
1757 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1759 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1760 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1761 * reached 0). Holding a topology reference implies that a malloc reference
1762 * will be held to @mstb as long as the user holds the topology reference.
1764 * Care should be taken to ensure that the user has at least one malloc
1765 * reference to @mstb. If you already have a topology reference to @mstb, you
1766 * should use drm_dp_mst_topology_get_mstb() instead.
1769 * drm_dp_mst_topology_get_mstb()
1770 * drm_dp_mst_topology_put_mstb()
1773 * * 1: A topology reference was grabbed successfully
1774 * * 0: @port is no longer in the topology, no reference was grabbed
1776 static int __must_check
1777 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1781 topology_ref_history_lock(mstb->mgr);
1782 ret = kref_get_unless_zero(&mstb->topology_kref);
1784 DRM_DEBUG("mstb %p (%d)\n",
1785 mstb, kref_read(&mstb->topology_kref));
1786 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1789 topology_ref_history_unlock(mstb->mgr);
1795 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1797 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1799 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1800 * not it's already reached 0. This is only valid to use in scenarios where
1801 * you are already guaranteed to have at least one active topology reference
1802 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1805 * drm_dp_mst_topology_try_get_mstb()
1806 * drm_dp_mst_topology_put_mstb()
1808 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1810 topology_ref_history_lock(mstb->mgr);
1812 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1813 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1814 kref_get(&mstb->topology_kref);
1815 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1817 topology_ref_history_unlock(mstb->mgr);
1821 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1823 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1825 * Releases a topology reference from @mstb by decrementing
1826 * &drm_dp_mst_branch.topology_kref.
1829 * drm_dp_mst_topology_try_get_mstb()
1830 * drm_dp_mst_topology_get_mstb()
1833 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1835 topology_ref_history_lock(mstb->mgr);
1837 DRM_DEBUG("mstb %p (%d)\n",
1838 mstb, kref_read(&mstb->topology_kref) - 1);
1839 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1841 topology_ref_history_unlock(mstb->mgr);
1842 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1845 static void drm_dp_destroy_port(struct kref *kref)
1847 struct drm_dp_mst_port *port =
1848 container_of(kref, struct drm_dp_mst_port, topology_kref);
1849 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1851 drm_dp_mst_dump_port_topology_history(port);
1853 /* There's nothing that needs locking to destroy an input port yet */
1855 drm_dp_mst_put_port_malloc(port);
1859 kfree(port->cached_edid);
1862 * we can't destroy the connector here, as we might be holding the
1863 * mode_config.mutex from an EDID retrieval
1865 mutex_lock(&mgr->delayed_destroy_lock);
1866 list_add(&port->next, &mgr->destroy_port_list);
1867 mutex_unlock(&mgr->delayed_destroy_lock);
1868 queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
1872 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1873 * port unless it's zero
1874 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1876 * Attempts to grab a topology reference to @port, if it hasn't yet been
1877 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1878 * 0). Holding a topology reference implies that a malloc reference will be
1879 * held to @port as long as the user holds the topology reference.
1881 * Care should be taken to ensure that the user has at least one malloc
1882 * reference to @port. If you already have a topology reference to @port, you
1883 * should use drm_dp_mst_topology_get_port() instead.
1886 * drm_dp_mst_topology_get_port()
1887 * drm_dp_mst_topology_put_port()
1890 * * 1: A topology reference was grabbed successfully
1891 * * 0: @port is no longer in the topology, no reference was grabbed
1893 static int __must_check
1894 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1898 topology_ref_history_lock(port->mgr);
1899 ret = kref_get_unless_zero(&port->topology_kref);
1901 DRM_DEBUG("port %p (%d)\n",
1902 port, kref_read(&port->topology_kref));
1903 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1906 topology_ref_history_unlock(port->mgr);
1911 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1912 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1914 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1915 * not it's already reached 0. This is only valid to use in scenarios where
1916 * you are already guaranteed to have at least one active topology reference
1917 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1920 * drm_dp_mst_topology_try_get_port()
1921 * drm_dp_mst_topology_put_port()
1923 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1925 topology_ref_history_lock(port->mgr);
1927 WARN_ON(kref_read(&port->topology_kref) == 0);
1928 kref_get(&port->topology_kref);
1929 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1930 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1932 topology_ref_history_unlock(port->mgr);
1936 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1937 * @port: The &struct drm_dp_mst_port to release the topology reference from
1939 * Releases a topology reference from @port by decrementing
1940 * &drm_dp_mst_port.topology_kref.
1943 * drm_dp_mst_topology_try_get_port()
1944 * drm_dp_mst_topology_get_port()
1946 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1948 topology_ref_history_lock(port->mgr);
1950 DRM_DEBUG("port %p (%d)\n",
1951 port, kref_read(&port->topology_kref) - 1);
1952 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1954 topology_ref_history_unlock(port->mgr);
1955 kref_put(&port->topology_kref, drm_dp_destroy_port);
1958 static struct drm_dp_mst_branch *
1959 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1960 struct drm_dp_mst_branch *to_find)
1962 struct drm_dp_mst_port *port;
1963 struct drm_dp_mst_branch *rmstb;
1965 if (to_find == mstb)
1968 list_for_each_entry(port, &mstb->ports, next) {
1970 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1971 port->mstb, to_find);
1979 static struct drm_dp_mst_branch *
1980 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1981 struct drm_dp_mst_branch *mstb)
1983 struct drm_dp_mst_branch *rmstb = NULL;
1985 mutex_lock(&mgr->lock);
1986 if (mgr->mst_primary) {
1987 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1988 mgr->mst_primary, mstb);
1990 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1993 mutex_unlock(&mgr->lock);
1997 static struct drm_dp_mst_port *
1998 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1999 struct drm_dp_mst_port *to_find)
2001 struct drm_dp_mst_port *port, *mport;
2003 list_for_each_entry(port, &mstb->ports, next) {
2004 if (port == to_find)
2008 mport = drm_dp_mst_topology_get_port_validated_locked(
2009 port->mstb, to_find);
2017 static struct drm_dp_mst_port *
2018 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
2019 struct drm_dp_mst_port *port)
2021 struct drm_dp_mst_port *rport = NULL;
2023 mutex_lock(&mgr->lock);
2024 if (mgr->mst_primary) {
2025 rport = drm_dp_mst_topology_get_port_validated_locked(
2026 mgr->mst_primary, port);
2028 if (rport && !drm_dp_mst_topology_try_get_port(rport))
2031 mutex_unlock(&mgr->lock);
2035 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
2037 struct drm_dp_mst_port *port;
2040 list_for_each_entry(port, &mstb->ports, next) {
2041 if (port->port_num == port_num) {
2042 ret = drm_dp_mst_topology_try_get_port(port);
2043 return ret ? port : NULL;
2051 * calculate a new RAD for this MST branch device
2052 * if parent has an LCT of 2 then it has 1 nibble of RAD,
2053 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
2055 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
2058 int parent_lct = port->parent->lct;
2060 int idx = (parent_lct - 1) / 2;
2062 if (parent_lct > 1) {
2063 memcpy(rad, port->parent->rad, idx + 1);
2064 shift = (parent_lct % 2) ? 4 : 0;
2068 rad[idx] |= port->port_num << shift;
2069 return parent_lct + 1;
2072 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
2075 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2076 case DP_PEER_DEVICE_SST_SINK:
2078 case DP_PEER_DEVICE_MST_BRANCHING:
2079 /* For sst branch device */
2089 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2092 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2093 struct drm_dp_mst_branch *mstb;
2097 if (port->pdt == new_pdt && port->mcs == new_mcs)
2100 /* Teardown the old pdt, if there is one */
2101 if (port->pdt != DP_PEER_DEVICE_NONE) {
2102 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2104 * If the new PDT would also have an i2c bus,
2105 * don't bother with reregistering it
2107 if (new_pdt != DP_PEER_DEVICE_NONE &&
2108 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2109 port->pdt = new_pdt;
2110 port->mcs = new_mcs;
2114 /* remove i2c over sideband */
2115 drm_dp_mst_unregister_i2c_bus(port);
2117 mutex_lock(&mgr->lock);
2118 drm_dp_mst_topology_put_mstb(port->mstb);
2120 mutex_unlock(&mgr->lock);
2124 port->pdt = new_pdt;
2125 port->mcs = new_mcs;
2127 if (port->pdt != DP_PEER_DEVICE_NONE) {
2128 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2129 /* add i2c over sideband */
2130 ret = drm_dp_mst_register_i2c_bus(port);
2132 lct = drm_dp_calculate_rad(port, rad);
2133 mstb = drm_dp_add_mst_branch_device(lct, rad);
2136 DRM_ERROR("Failed to create MSTB for port %p",
2141 mutex_lock(&mgr->lock);
2143 mstb->mgr = port->mgr;
2144 mstb->port_parent = port;
2147 * Make sure this port's memory allocation stays
2148 * around until its child MSTB releases it
2150 drm_dp_mst_get_port_malloc(port);
2151 mutex_unlock(&mgr->lock);
2153 /* And make sure we send a link address for this */
2160 port->pdt = DP_PEER_DEVICE_NONE;
2165 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2166 * @aux: Fake sideband AUX CH
2167 * @offset: address of the (first) register to read
2168 * @buffer: buffer to store the register values
2169 * @size: number of bytes in @buffer
2171 * Performs the same functionality for remote devices via
2172 * sideband messaging as drm_dp_dpcd_read() does for local
2173 * devices via actual AUX CH.
2175 * Return: Number of bytes read, or negative error code on failure.
2177 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2178 unsigned int offset, void *buffer, size_t size)
2180 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2183 return drm_dp_send_dpcd_read(port->mgr, port,
2184 offset, size, buffer);
2188 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2189 * @aux: Fake sideband AUX CH
2190 * @offset: address of the (first) register to write
2191 * @buffer: buffer containing the values to write
2192 * @size: number of bytes in @buffer
2194 * Performs the same functionality for remote devices via
2195 * sideband messaging as drm_dp_dpcd_write() does for local
2196 * devices via actual AUX CH.
2198 * Return: number of bytes written on success, negative error code on failure.
2200 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2201 unsigned int offset, void *buffer, size_t size)
2203 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2206 return drm_dp_send_dpcd_write(port->mgr, port,
2207 offset, size, buffer);
2210 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2214 memcpy(mstb->guid, guid, 16);
2216 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2217 if (mstb->port_parent) {
2218 ret = drm_dp_send_dpcd_write(mstb->mgr,
2220 DP_GUID, 16, mstb->guid);
2222 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2223 DP_GUID, mstb->guid, 16);
2227 if (ret < 16 && ret > 0)
2230 return ret == 16 ? 0 : ret;
2233 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2236 size_t proppath_size)
2241 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2242 for (i = 0; i < (mstb->lct - 1); i++) {
2243 int shift = (i % 2) ? 0 : 4;
2244 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2246 snprintf(temp, sizeof(temp), "-%d", port_num);
2247 strlcat(proppath, temp, proppath_size);
2249 snprintf(temp, sizeof(temp), "-%d", pnum);
2250 strlcat(proppath, temp, proppath_size);
2254 * drm_dp_mst_connector_late_register() - Late MST connector registration
2255 * @connector: The MST connector
2256 * @port: The MST port for this connector
2258 * Helper to register the remote aux device for this MST port. Drivers should
2259 * call this from their mst connector's late_register hook to enable MST aux
2262 * Return: 0 on success, negative error code on failure.
2264 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2265 struct drm_dp_mst_port *port)
2267 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2268 port->aux.name, connector->kdev->kobj.name);
2270 port->aux.dev = connector->kdev;
2271 return drm_dp_aux_register_devnode(&port->aux);
2273 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2276 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2277 * @connector: The MST connector
2278 * @port: The MST port for this connector
2280 * Helper to unregister the remote aux device for this MST port, registered by
2281 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2282 * connector's early_unregister hook.
2284 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2285 struct drm_dp_mst_port *port)
2287 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2288 port->aux.name, connector->kdev->kobj.name);
2289 drm_dp_aux_unregister_devnode(&port->aux);
2291 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2294 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2295 struct drm_dp_mst_port *port)
2297 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2301 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2302 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2303 if (!port->connector) {
2308 if (port->pdt != DP_PEER_DEVICE_NONE &&
2309 drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2310 port->port_num >= DP_MST_LOGICAL_PORT_0) {
2311 port->cached_edid = drm_get_edid(port->connector,
2313 drm_connector_set_tile_property(port->connector);
2316 drm_connector_register(port->connector);
2320 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2324 * Drop a topology reference, and unlink the port from the in-memory topology
2328 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2329 struct drm_dp_mst_port *port)
2331 mutex_lock(&mgr->lock);
2332 port->parent->num_ports--;
2333 list_del(&port->next);
2334 mutex_unlock(&mgr->lock);
2335 drm_dp_mst_topology_put_port(port);
2338 static struct drm_dp_mst_port *
2339 drm_dp_mst_add_port(struct drm_device *dev,
2340 struct drm_dp_mst_topology_mgr *mgr,
2341 struct drm_dp_mst_branch *mstb, u8 port_number)
2343 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2348 kref_init(&port->topology_kref);
2349 kref_init(&port->malloc_kref);
2350 port->parent = mstb;
2351 port->port_num = port_number;
2353 port->aux.name = "DPMST";
2354 port->aux.dev = dev->dev;
2355 port->aux.is_remote = true;
2357 /* initialize the MST downstream port's AUX crc work queue */
2358 drm_dp_remote_aux_init(&port->aux);
2361 * Make sure the memory allocation for our parent branch stays
2362 * around until our own memory allocation is released
2364 drm_dp_mst_get_mstb_malloc(mstb);
2370 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2371 struct drm_device *dev,
2372 struct drm_dp_link_addr_reply_port *port_msg)
2374 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2375 struct drm_dp_mst_port *port;
2376 int old_ddps = 0, ret;
2377 u8 new_pdt = DP_PEER_DEVICE_NONE;
2379 bool created = false, send_link_addr = false, changed = false;
2381 port = drm_dp_get_port(mstb, port_msg->port_number);
2383 port = drm_dp_mst_add_port(dev, mgr, mstb,
2384 port_msg->port_number);
2389 } else if (!port->input && port_msg->input_port && port->connector) {
2390 /* Since port->connector can't be changed here, we create a
2391 * new port if input_port changes from 0 to 1
2393 drm_dp_mst_topology_unlink_port(mgr, port);
2394 drm_dp_mst_topology_put_port(port);
2395 port = drm_dp_mst_add_port(dev, mgr, mstb,
2396 port_msg->port_number);
2401 } else if (port->input && !port_msg->input_port) {
2403 } else if (port->connector) {
2404 /* We're updating a port that's exposed to userspace, so do it
2407 drm_modeset_lock(&mgr->base.lock, NULL);
2409 old_ddps = port->ddps;
2410 changed = port->ddps != port_msg->ddps ||
2412 (port->ldps != port_msg->legacy_device_plug_status ||
2413 port->dpcd_rev != port_msg->dpcd_revision ||
2414 port->mcs != port_msg->mcs ||
2415 port->pdt != port_msg->peer_device_type ||
2416 port->num_sdp_stream_sinks !=
2417 port_msg->num_sdp_stream_sinks));
2420 port->input = port_msg->input_port;
2422 new_pdt = port_msg->peer_device_type;
2423 new_mcs = port_msg->mcs;
2424 port->ddps = port_msg->ddps;
2425 port->ldps = port_msg->legacy_device_plug_status;
2426 port->dpcd_rev = port_msg->dpcd_revision;
2427 port->num_sdp_streams = port_msg->num_sdp_streams;
2428 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2430 /* manage mstb port lists with mgr lock - take a reference
2433 mutex_lock(&mgr->lock);
2434 drm_dp_mst_topology_get_port(port);
2435 list_add(&port->next, &mstb->ports);
2437 mutex_unlock(&mgr->lock);
2441 * Reprobe PBN caps on both hotplug, and when re-probing the link
2442 * for our parent mstb
2444 if (old_ddps != port->ddps || !created) {
2445 if (port->ddps && !port->input) {
2446 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2455 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2457 send_link_addr = true;
2458 } else if (ret < 0) {
2459 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2465 * If this port wasn't just created, then we're reprobing because
2466 * we're coming out of suspend. In this case, always resend the link
2467 * address if there's an MSTB on this port
2469 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2471 send_link_addr = true;
2473 if (port->connector)
2474 drm_modeset_unlock(&mgr->base.lock);
2475 else if (!port->input)
2476 drm_dp_mst_port_add_connector(mstb, port);
2478 if (send_link_addr && port->mstb) {
2479 ret = drm_dp_send_link_address(mgr, port->mstb);
2480 if (ret == 1) /* MSTB below us changed */
2486 /* put reference to this port */
2487 drm_dp_mst_topology_put_port(port);
2491 drm_dp_mst_topology_unlink_port(mgr, port);
2492 if (port->connector)
2493 drm_modeset_unlock(&mgr->base.lock);
2495 drm_dp_mst_topology_put_port(port);
2500 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2501 struct drm_dp_connection_status_notify *conn_stat)
2503 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2504 struct drm_dp_mst_port *port;
2508 bool dowork = false, create_connector = false;
2510 port = drm_dp_get_port(mstb, conn_stat->port_number);
2514 if (port->connector) {
2515 if (!port->input && conn_stat->input_port) {
2517 * We can't remove a connector from an already exposed
2518 * port, so just throw the port out and make sure we
2519 * reprobe the link address of it's parent MSTB
2521 drm_dp_mst_topology_unlink_port(mgr, port);
2522 mstb->link_address_sent = false;
2527 /* Locking is only needed if the port's exposed to userspace */
2528 drm_modeset_lock(&mgr->base.lock, NULL);
2529 } else if (port->input && !conn_stat->input_port) {
2530 create_connector = true;
2531 /* Reprobe link address so we get num_sdp_streams */
2532 mstb->link_address_sent = false;
2536 old_ddps = port->ddps;
2537 port->input = conn_stat->input_port;
2538 port->ldps = conn_stat->legacy_device_plug_status;
2539 port->ddps = conn_stat->displayport_device_plug_status;
2541 if (old_ddps != port->ddps) {
2542 if (port->ddps && !port->input)
2543 drm_dp_send_enum_path_resources(mgr, mstb, port);
2548 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2549 new_mcs = conn_stat->message_capability_status;
2550 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2553 } else if (ret < 0) {
2554 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2559 if (port->connector)
2560 drm_modeset_unlock(&mgr->base.lock);
2561 else if (create_connector)
2562 drm_dp_mst_port_add_connector(mstb, port);
2565 drm_dp_mst_topology_put_port(port);
2567 queue_work(system_long_wq, &mstb->mgr->work);
2570 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2573 struct drm_dp_mst_branch *mstb;
2574 struct drm_dp_mst_port *port;
2576 /* find the port by iterating down */
2578 mutex_lock(&mgr->lock);
2579 mstb = mgr->mst_primary;
2584 for (i = 0; i < lct - 1; i++) {
2585 int shift = (i % 2) ? 0 : 4;
2586 int port_num = (rad[i / 2] >> shift) & 0xf;
2588 list_for_each_entry(port, &mstb->ports, next) {
2589 if (port->port_num == port_num) {
2592 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2600 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2604 mutex_unlock(&mgr->lock);
2608 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2609 struct drm_dp_mst_branch *mstb,
2610 const uint8_t *guid)
2612 struct drm_dp_mst_branch *found_mstb;
2613 struct drm_dp_mst_port *port;
2618 if (memcmp(mstb->guid, guid, 16) == 0)
2622 list_for_each_entry(port, &mstb->ports, next) {
2623 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2632 static struct drm_dp_mst_branch *
2633 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2634 const uint8_t *guid)
2636 struct drm_dp_mst_branch *mstb;
2639 /* find the port by iterating down */
2640 mutex_lock(&mgr->lock);
2642 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2644 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2649 mutex_unlock(&mgr->lock);
2653 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2654 struct drm_dp_mst_branch *mstb)
2656 struct drm_dp_mst_port *port;
2658 bool changed = false;
2660 if (!mstb->link_address_sent) {
2661 ret = drm_dp_send_link_address(mgr, mstb);
2668 list_for_each_entry(port, &mstb->ports, next) {
2669 struct drm_dp_mst_branch *mstb_child = NULL;
2671 if (port->input || !port->ddps)
2675 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2679 ret = drm_dp_check_and_send_link_address(mgr,
2681 drm_dp_mst_topology_put_mstb(mstb_child);
2692 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2694 struct drm_dp_mst_topology_mgr *mgr =
2695 container_of(work, struct drm_dp_mst_topology_mgr, work);
2696 struct drm_device *dev = mgr->dev;
2697 struct drm_dp_mst_branch *mstb;
2699 bool clear_payload_id_table;
2701 mutex_lock(&mgr->probe_lock);
2703 mutex_lock(&mgr->lock);
2704 clear_payload_id_table = !mgr->payload_id_table_cleared;
2705 mgr->payload_id_table_cleared = true;
2707 mstb = mgr->mst_primary;
2709 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2713 mutex_unlock(&mgr->lock);
2715 mutex_unlock(&mgr->probe_lock);
2720 * Certain branch devices seem to incorrectly report an available_pbn
2721 * of 0 on downstream sinks, even after clearing the
2722 * DP_PAYLOAD_ALLOCATE_* registers in
2723 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2724 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2725 * things work again.
2727 if (clear_payload_id_table) {
2728 DRM_DEBUG_KMS("Clearing payload ID table\n");
2729 drm_dp_send_clear_payload_id_table(mgr, mstb);
2732 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2733 drm_dp_mst_topology_put_mstb(mstb);
2735 mutex_unlock(&mgr->probe_lock);
2737 drm_kms_helper_hotplug_event(dev);
2740 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2745 if (memchr_inv(guid, 0, 16))
2748 salt = get_jiffies_64();
2750 memcpy(&guid[0], &salt, sizeof(u64));
2751 memcpy(&guid[8], &salt, sizeof(u64));
2756 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2757 u8 port_num, u32 offset, u8 num_bytes)
2759 struct drm_dp_sideband_msg_req_body req;
2761 req.req_type = DP_REMOTE_DPCD_READ;
2762 req.u.dpcd_read.port_number = port_num;
2763 req.u.dpcd_read.dpcd_address = offset;
2764 req.u.dpcd_read.num_bytes = num_bytes;
2765 drm_dp_encode_sideband_req(&req, msg);
2768 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2769 bool up, u8 *msg, int len)
2772 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2773 int tosend, total, offset;
2780 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2782 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2785 if (ret != tosend) {
2786 if (ret == -EIO && retries < 5) {
2790 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2796 } while (total > 0);
2800 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2801 struct drm_dp_sideband_msg_tx *txmsg)
2803 struct drm_dp_mst_branch *mstb = txmsg->dst;
2806 req_type = txmsg->msg[0] & 0x7f;
2807 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2808 req_type == DP_RESOURCE_STATUS_NOTIFY ||
2809 req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
2813 hdr->path_msg = txmsg->path_msg;
2814 if (hdr->broadcast) {
2818 hdr->lct = mstb->lct;
2819 hdr->lcr = mstb->lct - 1;
2822 memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
2827 * process a single block of the next message in the sideband queue
2829 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2830 struct drm_dp_sideband_msg_tx *txmsg,
2834 struct drm_dp_sideband_msg_hdr hdr;
2835 int len, space, idx, tosend;
2838 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2841 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2843 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2844 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2846 /* make hdr from dst mst */
2847 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2851 /* amount left to send in this message */
2852 len = txmsg->cur_len - txmsg->cur_offset;
2854 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2855 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2857 tosend = min(len, space);
2858 if (len == txmsg->cur_len)
2864 hdr.msg_len = tosend + 1;
2865 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2866 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2867 /* add crc at end */
2868 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2871 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2873 if (drm_debug_enabled(DRM_UT_DP)) {
2874 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2876 drm_printf(&p, "sideband msg failed to send\n");
2877 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2882 txmsg->cur_offset += tosend;
2883 if (txmsg->cur_offset == txmsg->cur_len) {
2884 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2890 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2892 struct drm_dp_sideband_msg_tx *txmsg;
2895 WARN_ON(!mutex_is_locked(&mgr->qlock));
2897 /* construct a chunk from the first msg in the tx_msg queue */
2898 if (list_empty(&mgr->tx_msg_downq))
2901 txmsg = list_first_entry(&mgr->tx_msg_downq,
2902 struct drm_dp_sideband_msg_tx, next);
2903 ret = process_single_tx_qlock(mgr, txmsg, false);
2905 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2906 list_del(&txmsg->next);
2907 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2908 wake_up_all(&mgr->tx_waitq);
2912 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2913 struct drm_dp_sideband_msg_tx *txmsg)
2915 mutex_lock(&mgr->qlock);
2916 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2918 if (drm_debug_enabled(DRM_UT_DP)) {
2919 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2921 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2924 if (list_is_singular(&mgr->tx_msg_downq))
2925 process_single_down_tx_qlock(mgr);
2926 mutex_unlock(&mgr->qlock);
2930 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2932 struct drm_dp_link_addr_reply_port *port_reply;
2935 for (i = 0; i < reply->nports; i++) {
2936 port_reply = &reply->ports[i];
2937 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2939 port_reply->input_port,
2940 port_reply->peer_device_type,
2941 port_reply->port_number,
2942 port_reply->dpcd_revision,
2945 port_reply->legacy_device_plug_status,
2946 port_reply->num_sdp_streams,
2947 port_reply->num_sdp_stream_sinks);
2951 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2952 struct drm_dp_mst_branch *mstb)
2954 struct drm_dp_sideband_msg_tx *txmsg;
2955 struct drm_dp_link_address_ack_reply *reply;
2956 struct drm_dp_mst_port *port, *tmp;
2957 int i, ret, port_mask = 0;
2958 bool changed = false;
2960 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2965 build_link_address(txmsg);
2967 mstb->link_address_sent = true;
2968 drm_dp_queue_down_tx(mgr, txmsg);
2970 /* FIXME: Actually do some real error handling here */
2971 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2973 DRM_ERROR("Sending link address failed with %d\n", ret);
2976 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2977 DRM_ERROR("link address NAK received\n");
2982 reply = &txmsg->reply.u.link_addr;
2983 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2984 drm_dp_dump_link_address(reply);
2986 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2990 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2991 DRM_ERROR("GUID check on %s failed: %d\n",
2996 for (i = 0; i < reply->nports; i++) {
2997 port_mask |= BIT(reply->ports[i].port_number);
2998 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
3006 /* Prune any ports that are currently a part of mstb in our in-memory
3007 * topology, but were not seen in this link address. Usually this
3008 * means that they were removed while the topology was out of sync,
3009 * e.g. during suspend/resume
3011 mutex_lock(&mgr->lock);
3012 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
3013 if (port_mask & BIT(port->port_num))
3016 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
3018 list_del(&port->next);
3019 drm_dp_mst_topology_put_port(port);
3022 mutex_unlock(&mgr->lock);
3026 mstb->link_address_sent = false;
3028 return ret < 0 ? ret : changed;
3032 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
3033 struct drm_dp_mst_branch *mstb)
3035 struct drm_dp_sideband_msg_tx *txmsg;
3038 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3043 build_clear_payload_id_table(txmsg);
3045 drm_dp_queue_down_tx(mgr, txmsg);
3047 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3048 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3049 DRM_DEBUG_KMS("clear payload table id nak received\n");
3055 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3056 struct drm_dp_mst_branch *mstb,
3057 struct drm_dp_mst_port *port)
3059 struct drm_dp_enum_path_resources_ack_reply *path_res;
3060 struct drm_dp_sideband_msg_tx *txmsg;
3063 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3068 build_enum_path_resources(txmsg, port->port_num);
3070 drm_dp_queue_down_tx(mgr, txmsg);
3072 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3075 path_res = &txmsg->reply.u.path_resources;
3077 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3078 DRM_DEBUG_KMS("enum path resources nak received\n");
3080 if (port->port_num != path_res->port_number)
3081 DRM_ERROR("got incorrect port in response\n");
3083 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
3084 path_res->port_number,
3085 path_res->full_payload_bw_number,
3086 path_res->avail_payload_bw_number);
3089 * If something changed, make sure we send a
3092 if (port->full_pbn != path_res->full_payload_bw_number ||
3093 port->fec_capable != path_res->fec_capable)
3096 port->full_pbn = path_res->full_payload_bw_number;
3097 port->fec_capable = path_res->fec_capable;
3105 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3107 if (!mstb->port_parent)
3110 if (mstb->port_parent->mstb != mstb)
3111 return mstb->port_parent;
3113 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3117 * Searches upwards in the topology starting from mstb to try to find the
3118 * closest available parent of mstb that's still connected to the rest of the
3119 * topology. This can be used in order to perform operations like releasing
3120 * payloads, where the branch device which owned the payload may no longer be
3121 * around and thus would require that the payload on the last living relative
3124 static struct drm_dp_mst_branch *
3125 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3126 struct drm_dp_mst_branch *mstb,
3129 struct drm_dp_mst_branch *rmstb = NULL;
3130 struct drm_dp_mst_port *found_port;
3132 mutex_lock(&mgr->lock);
3133 if (!mgr->mst_primary)
3137 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3141 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3142 rmstb = found_port->parent;
3143 *port_num = found_port->port_num;
3145 /* Search again, starting from this parent */
3146 mstb = found_port->parent;
3150 mutex_unlock(&mgr->lock);
3154 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3155 struct drm_dp_mst_port *port,
3159 struct drm_dp_sideband_msg_tx *txmsg;
3160 struct drm_dp_mst_branch *mstb;
3162 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3165 port_num = port->port_num;
3166 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3168 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3176 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3182 for (i = 0; i < port->num_sdp_streams; i++)
3186 build_allocate_payload(txmsg, port_num,
3188 pbn, port->num_sdp_streams, sinks);
3190 drm_dp_queue_down_tx(mgr, txmsg);
3193 * FIXME: there is a small chance that between getting the last
3194 * connected mstb and sending the payload message, the last connected
3195 * mstb could also be removed from the topology. In the future, this
3196 * needs to be fixed by restarting the
3197 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3198 * timeout if the topology is still connected to the system.
3200 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3202 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3209 drm_dp_mst_topology_put_mstb(mstb);
3213 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3214 struct drm_dp_mst_port *port, bool power_up)
3216 struct drm_dp_sideband_msg_tx *txmsg;
3219 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3223 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3225 drm_dp_mst_topology_put_port(port);
3229 txmsg->dst = port->parent;
3230 build_power_updown_phy(txmsg, port->port_num, power_up);
3231 drm_dp_queue_down_tx(mgr, txmsg);
3233 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3235 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3241 drm_dp_mst_topology_put_port(port);
3245 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3247 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
3248 struct drm_dp_mst_port *port,
3249 struct drm_dp_query_stream_enc_status_ack_reply *status)
3251 struct drm_dp_sideband_msg_tx *txmsg;
3255 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3259 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3265 get_random_bytes(nonce, sizeof(nonce));
3268 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
3269 * transaction at the MST Branch device directly connected to the
3272 txmsg->dst = mgr->mst_primary;
3274 len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
3276 drm_dp_queue_down_tx(mgr, txmsg);
3278 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3281 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3282 drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3288 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3291 drm_dp_mst_topology_put_port(port);
3296 EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
3298 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3300 struct drm_dp_payload *payload)
3304 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3306 payload->payload_state = 0;
3309 payload->payload_state = DP_PAYLOAD_LOCAL;
3313 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3314 struct drm_dp_mst_port *port,
3316 struct drm_dp_payload *payload)
3320 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3323 payload->payload_state = DP_PAYLOAD_REMOTE;
3327 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3328 struct drm_dp_mst_port *port,
3330 struct drm_dp_payload *payload)
3332 DRM_DEBUG_KMS("\n");
3333 /* it's okay for these to fail */
3335 drm_dp_payload_send_msg(mgr, port, id, 0);
3338 drm_dp_dpcd_write_payload(mgr, id, payload);
3339 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3343 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3345 struct drm_dp_payload *payload)
3347 payload->payload_state = 0;
3352 * drm_dp_update_payload_part1() - Execute payload update part 1
3353 * @mgr: manager to use.
3355 * This iterates over all proposed virtual channels, and tries to
3356 * allocate space in the link for them. For 0->slots transitions,
3357 * this step just writes the VCPI to the MST device. For slots->0
3358 * transitions, this writes the updated VCPIs and removes the
3359 * remote VC payloads.
3361 * after calling this the driver should generate ACT and payload
3364 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3366 struct drm_dp_payload req_payload;
3367 struct drm_dp_mst_port *port;
3372 mutex_lock(&mgr->payload_lock);
3373 for (i = 0; i < mgr->max_payloads; i++) {
3374 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3375 struct drm_dp_payload *payload = &mgr->payloads[i];
3376 bool put_port = false;
3378 /* solve the current payloads - compare to the hw ones
3379 - update the hw view */
3380 req_payload.start_slot = cur_slots;
3382 port = container_of(vcpi, struct drm_dp_mst_port,
3385 mutex_lock(&mgr->lock);
3386 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3387 mutex_unlock(&mgr->lock);
3390 drm_dbg_kms(mgr->dev,
3391 "Virtual channel %d is not in current topology\n",
3395 /* Validated ports don't matter if we're releasing
3398 if (vcpi->num_slots) {
3399 port = drm_dp_mst_topology_get_port_validated(
3402 if (vcpi->num_slots == payload->num_slots) {
3403 cur_slots += vcpi->num_slots;
3404 payload->start_slot = req_payload.start_slot;
3407 drm_dbg_kms(mgr->dev,
3408 "Fail:set payload to invalid sink");
3409 mutex_unlock(&mgr->payload_lock);
3416 req_payload.num_slots = vcpi->num_slots;
3417 req_payload.vcpi = vcpi->vcpi;
3420 req_payload.num_slots = 0;
3423 payload->start_slot = req_payload.start_slot;
3424 /* work out what is required to happen with this payload */
3425 if (payload->num_slots != req_payload.num_slots) {
3427 /* need to push an update for this payload */
3428 if (req_payload.num_slots) {
3429 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3431 payload->num_slots = req_payload.num_slots;
3432 payload->vcpi = req_payload.vcpi;
3434 } else if (payload->num_slots) {
3435 payload->num_slots = 0;
3436 drm_dp_destroy_payload_step1(mgr, port,
3439 req_payload.payload_state =
3440 payload->payload_state;
3441 payload->start_slot = 0;
3443 payload->payload_state = req_payload.payload_state;
3445 cur_slots += req_payload.num_slots;
3448 drm_dp_mst_topology_put_port(port);
3451 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3452 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3457 DRM_DEBUG_KMS("removing payload %d\n", i);
3458 for (j = i; j < mgr->max_payloads - 1; j++) {
3459 mgr->payloads[j] = mgr->payloads[j + 1];
3460 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3462 if (mgr->proposed_vcpis[j] &&
3463 mgr->proposed_vcpis[j]->num_slots) {
3464 set_bit(j + 1, &mgr->payload_mask);
3466 clear_bit(j + 1, &mgr->payload_mask);
3470 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3471 sizeof(struct drm_dp_payload));
3472 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3473 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3475 mutex_unlock(&mgr->payload_lock);
3479 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3482 * drm_dp_update_payload_part2() - Execute payload update part 2
3483 * @mgr: manager to use.
3485 * This iterates over all proposed virtual channels, and tries to
3486 * allocate space in the link for them. For 0->slots transitions,
3487 * this step writes the remote VC payload commands. For slots->0
3488 * this just resets some internal state.
3490 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3492 struct drm_dp_mst_port *port;
3497 mutex_lock(&mgr->payload_lock);
3498 for (i = 0; i < mgr->max_payloads; i++) {
3500 if (!mgr->proposed_vcpis[i])
3503 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3505 mutex_lock(&mgr->lock);
3506 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3507 mutex_unlock(&mgr->lock);
3512 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3513 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3514 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3515 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3516 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3519 mutex_unlock(&mgr->payload_lock);
3523 mutex_unlock(&mgr->payload_lock);
3526 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3528 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3529 struct drm_dp_mst_port *port,
3530 int offset, int size, u8 *bytes)
3533 struct drm_dp_sideband_msg_tx *txmsg;
3534 struct drm_dp_mst_branch *mstb;
3536 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3540 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3546 build_dpcd_read(txmsg, port->port_num, offset, size);
3547 txmsg->dst = port->parent;
3549 drm_dp_queue_down_tx(mgr, txmsg);
3551 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3555 /* DPCD read should never be NACKed */
3556 if (txmsg->reply.reply_type == 1) {
3557 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3558 mstb, port->port_num, offset, size);
3563 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3568 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3570 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3575 drm_dp_mst_topology_put_mstb(mstb);
3580 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3581 struct drm_dp_mst_port *port,
3582 int offset, int size, u8 *bytes)
3585 struct drm_dp_sideband_msg_tx *txmsg;
3586 struct drm_dp_mst_branch *mstb;
3588 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3592 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3598 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3601 drm_dp_queue_down_tx(mgr, txmsg);
3603 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3605 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3613 drm_dp_mst_topology_put_mstb(mstb);
3617 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3619 struct drm_dp_sideband_msg_reply_body reply;
3621 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3622 reply.req_type = req_type;
3623 drm_dp_encode_sideband_reply(&reply, msg);
3627 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3628 struct drm_dp_mst_branch *mstb,
3629 int req_type, bool broadcast)
3631 struct drm_dp_sideband_msg_tx *txmsg;
3633 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3638 drm_dp_encode_up_ack_reply(txmsg, req_type);
3640 mutex_lock(&mgr->qlock);
3641 /* construct a chunk from the first msg in the tx_msg queue */
3642 process_single_tx_qlock(mgr, txmsg, true);
3643 mutex_unlock(&mgr->qlock);
3650 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
3651 * @link_rate: link rate in 10kbits/s units
3652 * @link_lane_count: lane count
3654 * Calculate the total bandwidth of a MultiStream Transport link. The returned
3655 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
3656 * convert the number of PBNs required for a given stream to the number of
3657 * timeslots this stream requires in each MTP.
3659 int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
3661 if (link_rate == 0 || link_lane_count == 0)
3662 DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
3663 link_rate, link_lane_count);
3665 /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
3666 return link_rate * link_lane_count / 54000;
3668 EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3671 * drm_dp_read_mst_cap() - check whether or not a sink supports MST
3672 * @aux: The DP AUX channel to use
3673 * @dpcd: A cached copy of the DPCD capabilities for this sink
3675 * Returns: %True if the sink supports MST, %false otherwise
3677 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3678 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3682 if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
3685 if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
3688 return mstm_cap & DP_MST_CAP;
3690 EXPORT_SYMBOL(drm_dp_read_mst_cap);
3693 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3694 * @mgr: manager to set state for
3695 * @mst_state: true to enable MST on this connector - false to disable.
3697 * This is called by the driver when it detects an MST capable device plugged
3698 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3700 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3703 struct drm_dp_mst_branch *mstb = NULL;
3705 mutex_lock(&mgr->payload_lock);
3706 mutex_lock(&mgr->lock);
3707 if (mst_state == mgr->mst_state)
3710 mgr->mst_state = mst_state;
3711 /* set the device into MST mode */
3713 struct drm_dp_payload reset_pay;
3715 WARN_ON(mgr->mst_primary);
3718 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3719 if (ret != DP_RECEIVER_CAP_SIZE) {
3720 DRM_DEBUG_KMS("failed to read DPCD\n");
3724 mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
3725 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3726 if (mgr->pbn_div == 0) {
3731 /* add initial branch device at LCT 1 */
3732 mstb = drm_dp_add_mst_branch_device(1, NULL);
3739 /* give this the main reference */
3740 mgr->mst_primary = mstb;
3741 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3743 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3746 DP_UPSTREAM_IS_SRC);
3750 reset_pay.start_slot = 0;
3751 reset_pay.num_slots = 0x3f;
3752 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3754 queue_work(system_long_wq, &mgr->work);
3758 /* disable MST on the device */
3759 mstb = mgr->mst_primary;
3760 mgr->mst_primary = NULL;
3761 /* this can fail if the device is gone */
3762 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3764 memset(mgr->payloads, 0,
3765 mgr->max_payloads * sizeof(mgr->payloads[0]));
3766 memset(mgr->proposed_vcpis, 0,
3767 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3768 mgr->payload_mask = 0;
3769 set_bit(0, &mgr->payload_mask);
3771 mgr->payload_id_table_cleared = false;
3773 memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
3774 memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
3778 mutex_unlock(&mgr->lock);
3779 mutex_unlock(&mgr->payload_lock);
3781 drm_dp_mst_topology_put_mstb(mstb);
3785 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3788 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3790 struct drm_dp_mst_port *port;
3792 /* The link address will need to be re-sent on resume */
3793 mstb->link_address_sent = false;
3795 list_for_each_entry(port, &mstb->ports, next)
3797 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3801 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3802 * @mgr: manager to suspend
3804 * This function tells the MST device that we can't handle UP messages
3805 * anymore. This should stop it from sending any since we are suspended.
3807 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3809 mutex_lock(&mgr->lock);
3810 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3811 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3812 mutex_unlock(&mgr->lock);
3813 flush_work(&mgr->up_req_work);
3814 flush_work(&mgr->work);
3815 flush_work(&mgr->delayed_destroy_work);
3817 mutex_lock(&mgr->lock);
3818 if (mgr->mst_state && mgr->mst_primary)
3819 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3820 mutex_unlock(&mgr->lock);
3822 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3825 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3826 * @mgr: manager to resume
3827 * @sync: whether or not to perform topology reprobing synchronously
3829 * This will fetch DPCD and see if the device is still there,
3830 * if it is, it will rewrite the MSTM control bits, and return.
3832 * If the device fails this returns -1, and the driver should do
3833 * a full MST reprobe, in case we were undocked.
3835 * During system resume (where it is assumed that the driver will be calling
3836 * drm_atomic_helper_resume()) this function should be called beforehand with
3837 * @sync set to true. In contexts like runtime resume where the driver is not
3838 * expected to be calling drm_atomic_helper_resume(), this function should be
3839 * called with @sync set to false in order to avoid deadlocking.
3841 * Returns: -1 if the MST topology was removed while we were suspended, 0
3844 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3850 mutex_lock(&mgr->lock);
3851 if (!mgr->mst_primary)
3854 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3855 DP_RECEIVER_CAP_SIZE);
3856 if (ret != DP_RECEIVER_CAP_SIZE) {
3857 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3861 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3864 DP_UPSTREAM_IS_SRC);
3866 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3870 /* Some hubs forget their guids after they resume */
3871 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3873 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3877 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3879 DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3884 * For the final step of resuming the topology, we need to bring the
3885 * state of our in-memory topology back into sync with reality. So,
3886 * restart the probing process as if we're probing a new hub
3888 queue_work(system_long_wq, &mgr->work);
3889 mutex_unlock(&mgr->lock);
3892 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3893 flush_work(&mgr->work);
3899 mutex_unlock(&mgr->lock);
3902 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3905 drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3906 struct drm_dp_mst_branch **mstb)
3910 int replylen, curreply;
3913 struct drm_dp_sideband_msg_hdr hdr;
3914 struct drm_dp_sideband_msg_rx *msg =
3915 up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3916 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3917 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3922 len = min(mgr->max_dpcd_transaction_bytes, 16);
3923 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3925 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3929 ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
3931 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3932 1, replyblock, len, false);
3933 DRM_DEBUG_KMS("ERROR: failed header\n");
3938 /* Caller is responsible for giving back this reference */
3939 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3941 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3947 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3948 DRM_DEBUG_KMS("sideband msg set header failed %d\n",
3953 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3954 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3956 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3960 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3962 while (replylen > 0) {
3963 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3964 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3967 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3972 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3974 DRM_DEBUG_KMS("failed to build sideband msg\n");
3984 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3986 struct drm_dp_sideband_msg_tx *txmsg;
3987 struct drm_dp_mst_branch *mstb = NULL;
3988 struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
3990 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3991 goto out_clear_reply;
3993 /* Multi-packet message transmission, don't clear the reply */
3994 if (!msg->have_eomt)
3997 /* find the message */
3998 mutex_lock(&mgr->qlock);
3999 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4000 struct drm_dp_sideband_msg_tx, next);
4001 mutex_unlock(&mgr->qlock);
4003 /* Were we actually expecting a response, and from this mstb? */
4004 if (!txmsg || txmsg->dst != mstb) {
4005 struct drm_dp_sideband_msg_hdr *hdr;
4007 hdr = &msg->initial_hdr;
4008 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
4009 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
4011 goto out_clear_reply;
4014 drm_dp_sideband_parse_reply(msg, &txmsg->reply);
4016 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4017 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
4018 txmsg->reply.req_type,
4019 drm_dp_mst_req_type_str(txmsg->reply.req_type),
4020 txmsg->reply.u.nak.reason,
4021 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
4022 txmsg->reply.u.nak.nak_data);
4025 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4026 drm_dp_mst_topology_put_mstb(mstb);
4028 mutex_lock(&mgr->qlock);
4029 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
4030 list_del(&txmsg->next);
4031 mutex_unlock(&mgr->qlock);
4033 wake_up_all(&mgr->tx_waitq);
4038 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4041 drm_dp_mst_topology_put_mstb(mstb);
4047 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4048 struct drm_dp_pending_up_req *up_req)
4050 struct drm_dp_mst_branch *mstb = NULL;
4051 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
4052 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
4053 bool hotplug = false;
4055 if (hdr->broadcast) {
4056 const u8 *guid = NULL;
4058 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
4059 guid = msg->u.conn_stat.guid;
4060 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
4061 guid = msg->u.resource_stat.guid;
4064 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4066 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4070 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
4075 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
4076 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
4077 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4081 drm_dp_mst_topology_put_mstb(mstb);
4085 static void drm_dp_mst_up_req_work(struct work_struct *work)
4087 struct drm_dp_mst_topology_mgr *mgr =
4088 container_of(work, struct drm_dp_mst_topology_mgr,
4090 struct drm_dp_pending_up_req *up_req;
4091 bool send_hotplug = false;
4093 mutex_lock(&mgr->probe_lock);
4095 mutex_lock(&mgr->up_req_lock);
4096 up_req = list_first_entry_or_null(&mgr->up_req_list,
4097 struct drm_dp_pending_up_req,
4100 list_del(&up_req->next);
4101 mutex_unlock(&mgr->up_req_lock);
4106 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4109 mutex_unlock(&mgr->probe_lock);
4112 drm_kms_helper_hotplug_event(mgr->dev);
4115 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
4117 struct drm_dp_pending_up_req *up_req;
4119 if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4122 if (!mgr->up_req_recv.have_eomt)
4125 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4127 DRM_ERROR("Not enough memory to process MST up req\n");
4130 INIT_LIST_HEAD(&up_req->next);
4132 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
4134 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4135 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4136 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
4137 up_req->msg.req_type);
4142 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4145 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4146 const struct drm_dp_connection_status_notify *conn_stat =
4147 &up_req->msg.u.conn_stat;
4149 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4150 conn_stat->port_number,
4151 conn_stat->legacy_device_plug_status,
4152 conn_stat->displayport_device_plug_status,
4153 conn_stat->message_capability_status,
4154 conn_stat->input_port,
4155 conn_stat->peer_device_type);
4156 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4157 const struct drm_dp_resource_status_notify *res_stat =
4158 &up_req->msg.u.resource_stat;
4160 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
4161 res_stat->port_number,
4162 res_stat->available_pbn);
4165 up_req->hdr = mgr->up_req_recv.initial_hdr;
4166 mutex_lock(&mgr->up_req_lock);
4167 list_add_tail(&up_req->next, &mgr->up_req_list);
4168 mutex_unlock(&mgr->up_req_lock);
4169 queue_work(system_long_wq, &mgr->up_req_work);
4172 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4177 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
4178 * @mgr: manager to notify irq for.
4179 * @esi: 4 bytes from SINK_COUNT_ESI
4180 * @handled: whether the hpd interrupt was consumed or not
4182 * This should be called from the driver when it detects a short IRQ,
4183 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
4184 * topology manager will process the sideband messages received as a result
4187 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
4194 if (sc != mgr->sink_count) {
4195 mgr->sink_count = sc;
4199 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
4200 ret = drm_dp_mst_handle_down_rep(mgr);
4204 if (esi[1] & DP_UP_REQ_MSG_RDY) {
4205 ret |= drm_dp_mst_handle_up_req(mgr);
4209 drm_dp_mst_kick_tx(mgr);
4212 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4215 * drm_dp_mst_detect_port() - get connection status for an MST port
4216 * @connector: DRM connector for this port
4217 * @ctx: The acquisition context to use for grabbing locks
4218 * @mgr: manager for this port
4219 * @port: pointer to a port
4221 * This returns the current connection state for a port.
4224 drm_dp_mst_detect_port(struct drm_connector *connector,
4225 struct drm_modeset_acquire_ctx *ctx,
4226 struct drm_dp_mst_topology_mgr *mgr,
4227 struct drm_dp_mst_port *port)
4231 /* we need to search for the port in the mgr in case it's gone */
4232 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4234 return connector_status_disconnected;
4236 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4240 ret = connector_status_disconnected;
4245 switch (port->pdt) {
4246 case DP_PEER_DEVICE_NONE:
4248 case DP_PEER_DEVICE_MST_BRANCHING:
4250 ret = connector_status_connected;
4253 case DP_PEER_DEVICE_SST_SINK:
4254 ret = connector_status_connected;
4255 /* for logical ports - cache the EDID */
4256 if (port->port_num >= 8 && !port->cached_edid) {
4257 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4260 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4262 ret = connector_status_connected;
4266 drm_dp_mst_topology_put_port(port);
4269 EXPORT_SYMBOL(drm_dp_mst_detect_port);
4272 * drm_dp_mst_get_edid() - get EDID for an MST port
4273 * @connector: toplevel connector to get EDID for
4274 * @mgr: manager for this port
4275 * @port: unverified pointer to a port.
4277 * This returns an EDID for the port connected to a connector,
4278 * It validates the pointer still exists so the caller doesn't require a
4281 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4283 struct edid *edid = NULL;
4285 /* we need to search for the port in the mgr in case it's gone */
4286 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4290 if (port->cached_edid)
4291 edid = drm_edid_duplicate(port->cached_edid);
4293 edid = drm_get_edid(connector, &port->aux.ddc);
4295 port->has_audio = drm_detect_monitor_audio(edid);
4296 drm_dp_mst_topology_put_port(port);
4299 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4302 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4303 * @mgr: manager to use
4304 * @pbn: payload bandwidth to convert into slots.
4306 * Calculate the number of VCPI slots that will be required for the given PBN
4307 * value. This function is deprecated, and should not be used in atomic
4311 * The total slots required for this port, or error.
4313 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4318 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4320 /* max. time slots - one slot for MTP header */
4325 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4327 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4328 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4332 /* max. time slots - one slot for MTP header */
4337 vcpi->aligned_pbn = slots * mgr->pbn_div;
4338 vcpi->num_slots = slots;
4340 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4347 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4348 * @state: global atomic state
4349 * @mgr: MST topology manager for the port
4350 * @port: port to find vcpi slots for
4351 * @pbn: bandwidth required for the mode in PBN
4352 * @pbn_div: divider for DSC mode that takes FEC into account
4354 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4355 * may have had. Any atomic drivers which support MST must call this function
4356 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4357 * current VCPI allocation for the new state, but only when
4358 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4359 * to ensure compatibility with userspace applications that still use the
4360 * legacy modesetting UAPI.
4362 * Allocations set by this function are not checked against the bandwidth
4363 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4365 * Additionally, it is OK to call this function multiple times on the same
4366 * @port as needed. It is not OK however, to call this function and
4367 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4370 * drm_dp_atomic_release_vcpi_slots()
4371 * drm_dp_mst_atomic_check()
4374 * Total slots in the atomic state assigned for this port, or a negative error
4375 * code if the port no longer exists
4377 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4378 struct drm_dp_mst_topology_mgr *mgr,
4379 struct drm_dp_mst_port *port, int pbn,
4382 struct drm_dp_mst_topology_state *topology_state;
4383 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4384 int prev_slots, prev_bw, req_slots;
4386 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4387 if (IS_ERR(topology_state))
4388 return PTR_ERR(topology_state);
4390 /* Find the current allocation for this port, if any */
4391 list_for_each_entry(pos, &topology_state->vcpis, next) {
4392 if (pos->port == port) {
4394 prev_slots = vcpi->vcpi;
4395 prev_bw = vcpi->pbn;
4398 * This should never happen, unless the driver tries
4399 * releasing and allocating the same VCPI allocation,
4402 if (WARN_ON(!prev_slots)) {
4403 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4417 pbn_div = mgr->pbn_div;
4419 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4421 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4422 port->connector->base.id, port->connector->name,
4423 port, prev_slots, req_slots);
4424 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4425 port->connector->base.id, port->connector->name,
4426 port, prev_bw, pbn);
4428 /* Add the new allocation to the state */
4430 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4434 drm_dp_mst_get_port_malloc(port);
4436 list_add(&vcpi->next, &topology_state->vcpis);
4438 vcpi->vcpi = req_slots;
4443 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4446 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4447 * @state: global atomic state
4448 * @mgr: MST topology manager for the port
4449 * @port: The port to release the VCPI slots from
4451 * Releases any VCPI slots that have been allocated to a port in the atomic
4452 * state. Any atomic drivers which support MST must call this function in
4453 * their &drm_connector_helper_funcs.atomic_check() callback when the
4454 * connector will no longer have VCPI allocated (e.g. because its CRTC was
4455 * removed) when it had VCPI allocated in the previous atomic state.
4457 * It is OK to call this even if @port has been removed from the system.
4458 * Additionally, it is OK to call this function multiple times on the same
4459 * @port as needed. It is not OK however, to call this function and
4460 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4464 * drm_dp_atomic_find_vcpi_slots()
4465 * drm_dp_mst_atomic_check()
4468 * 0 if all slots for this port were added back to
4469 * &drm_dp_mst_topology_state.avail_slots or negative error code
4471 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4472 struct drm_dp_mst_topology_mgr *mgr,
4473 struct drm_dp_mst_port *port)
4475 struct drm_dp_mst_topology_state *topology_state;
4476 struct drm_dp_vcpi_allocation *pos;
4479 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4480 if (IS_ERR(topology_state))
4481 return PTR_ERR(topology_state);
4483 list_for_each_entry(pos, &topology_state->vcpis, next) {
4484 if (pos->port == port) {
4489 if (WARN_ON(!found)) {
4490 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4491 port, &topology_state->base);
4495 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4497 drm_dp_mst_put_port_malloc(port);
4504 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4507 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4508 * @mgr: manager for this port
4509 * @port: port to allocate a virtual channel for.
4510 * @pbn: payload bandwidth number to request
4511 * @slots: returned number of slots for this PBN.
4513 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4514 struct drm_dp_mst_port *port, int pbn, int slots)
4521 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4525 if (port->vcpi.vcpi > 0) {
4526 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4527 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4528 if (pbn == port->vcpi.pbn) {
4529 drm_dp_mst_topology_put_port(port);
4534 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4536 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4537 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4538 drm_dp_mst_topology_put_port(port);
4541 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4542 pbn, port->vcpi.num_slots);
4544 /* Keep port allocated until its payload has been removed */
4545 drm_dp_mst_get_port_malloc(port);
4546 drm_dp_mst_topology_put_port(port);
4551 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4553 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4557 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4561 slots = port->vcpi.num_slots;
4562 drm_dp_mst_topology_put_port(port);
4565 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4568 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4569 * @mgr: manager for this port
4570 * @port: unverified pointer to a port.
4572 * This just resets the number of slots for the ports VCPI for later programming.
4574 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4577 * A port with VCPI will remain allocated until its VCPI is
4578 * released, no verified ref needed
4581 port->vcpi.num_slots = 0;
4583 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4586 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4587 * @mgr: manager for this port
4588 * @port: port to deallocate vcpi for
4590 * This can be called unconditionally, regardless of whether
4591 * drm_dp_mst_allocate_vcpi() succeeded or not.
4593 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4594 struct drm_dp_mst_port *port)
4598 if (!port->vcpi.vcpi)
4601 mutex_lock(&mgr->lock);
4602 skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
4603 mutex_unlock(&mgr->lock);
4608 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4609 port->vcpi.num_slots = 0;
4611 port->vcpi.aligned_pbn = 0;
4612 port->vcpi.vcpi = 0;
4613 drm_dp_mst_put_port_malloc(port);
4615 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4617 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4618 int id, struct drm_dp_payload *payload)
4620 u8 payload_alloc[3], status;
4624 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4625 DP_PAYLOAD_TABLE_UPDATED);
4627 payload_alloc[0] = id;
4628 payload_alloc[1] = payload->start_slot;
4629 payload_alloc[2] = payload->num_slots;
4631 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4633 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4638 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4640 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4644 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4647 usleep_range(10000, 20000);
4650 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4659 static int do_get_act_status(struct drm_dp_aux *aux)
4664 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4672 * drm_dp_check_act_status() - Polls for ACT handled status.
4673 * @mgr: manager to use
4675 * Tries waiting for the MST hub to finish updating it's payload table by
4676 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4680 * 0 if the ACT was handled in time, negative error code on failure.
4682 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4685 * There doesn't seem to be any recommended retry count or timeout in
4686 * the MST specification. Since some hubs have been observed to take
4687 * over 1 second to update their payload allocations under certain
4688 * conditions, we use a rather large timeout value.
4690 const int timeout_ms = 3000;
4693 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4694 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4695 200, timeout_ms * USEC_PER_MSEC);
4696 if (ret < 0 && status >= 0) {
4697 DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
4698 timeout_ms, status);
4700 } else if (status < 0) {
4702 * Failure here isn't unexpected - the hub may have
4703 * just been unplugged
4705 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
4712 EXPORT_SYMBOL(drm_dp_check_act_status);
4715 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4716 * @clock: dot clock for the mode
4717 * @bpp: bpp for the mode.
4718 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
4720 * This uses the formula in the spec to calculate the PBN value for a mode.
4722 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4725 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4726 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4727 * common multiplier to render an integer PBN for all link rate/lane
4728 * counts combinations
4730 * peak_kbps *= (1006/1000)
4731 * peak_kbps *= (64/54)
4732 * peak_kbps *= 8 convert to bytes
4734 * If the bpp is in units of 1/16, further divide by 16. Put this
4735 * factor in the numerator rather than the denominator to avoid
4740 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4741 8 * 54 * 1000 * 1000);
4743 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4744 8 * 54 * 1000 * 1000);
4746 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4748 /* we want to kick the TX after we've ack the up/down IRQs. */
4749 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4751 queue_work(system_long_wq, &mgr->tx_work);
4754 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4755 struct drm_dp_mst_branch *mstb)
4757 struct drm_dp_mst_port *port;
4758 int tabs = mstb->lct;
4762 for (i = 0; i < tabs; i++)
4766 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4767 list_for_each_entry(port, &mstb->ports, next) {
4768 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4770 drm_dp_mst_dump_mstb(m, port->mstb);
4774 #define DP_PAYLOAD_TABLE_SIZE 64
4776 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4781 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4782 if (drm_dp_dpcd_read(mgr->aux,
4783 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4790 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4791 struct drm_dp_mst_port *port, char *name,
4794 struct edid *mst_edid;
4796 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4797 drm_edid_get_monitor_name(mst_edid, name, namelen);
4802 * drm_dp_mst_dump_topology(): dump topology to seq file.
4803 * @m: seq_file to dump output to
4804 * @mgr: manager to dump current topology for.
4806 * helper to dump MST topology to a seq file for debugfs.
4808 void drm_dp_mst_dump_topology(struct seq_file *m,
4809 struct drm_dp_mst_topology_mgr *mgr)
4812 struct drm_dp_mst_port *port;
4814 mutex_lock(&mgr->lock);
4815 if (mgr->mst_primary)
4816 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4819 mutex_unlock(&mgr->lock);
4821 mutex_lock(&mgr->payload_lock);
4822 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4825 for (i = 0; i < mgr->max_payloads; i++) {
4826 if (mgr->proposed_vcpis[i]) {
4829 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4830 fetch_monitor_name(mgr, port, name, sizeof(name));
4831 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4832 port->port_num, port->vcpi.vcpi,
4833 port->vcpi.num_slots,
4834 (*name != 0) ? name : "Unknown");
4836 seq_printf(m, "vcpi %d:unused\n", i);
4838 for (i = 0; i < mgr->max_payloads; i++) {
4839 seq_printf(m, "payload %d: %d, %d, %d\n",
4841 mgr->payloads[i].payload_state,
4842 mgr->payloads[i].start_slot,
4843 mgr->payloads[i].num_slots);
4847 mutex_unlock(&mgr->payload_lock);
4849 mutex_lock(&mgr->lock);
4850 if (mgr->mst_primary) {
4851 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4854 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4856 seq_printf(m, "dpcd read failed\n");
4859 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4861 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4863 seq_printf(m, "faux/mst read failed\n");
4866 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4868 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4870 seq_printf(m, "mst ctrl read failed\n");
4873 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4875 /* dump the standard OUI branch header */
4876 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4877 if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
4878 seq_printf(m, "branch oui read failed\n");
4881 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4883 for (i = 0x3; i < 0x8 && buf[i]; i++)
4884 seq_printf(m, "%c", buf[i]);
4885 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4886 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4887 if (dump_dp_payload_table(mgr, buf))
4888 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4892 mutex_unlock(&mgr->lock);
4895 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4897 static void drm_dp_tx_work(struct work_struct *work)
4899 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4901 mutex_lock(&mgr->qlock);
4902 if (!list_empty(&mgr->tx_msg_downq))
4903 process_single_down_tx_qlock(mgr);
4904 mutex_unlock(&mgr->qlock);
4908 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4910 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4912 if (port->connector) {
4913 drm_connector_unregister(port->connector);
4914 drm_connector_put(port->connector);
4917 drm_dp_mst_put_port_malloc(port);
4921 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4923 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4924 struct drm_dp_mst_port *port, *port_tmp;
4925 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4926 bool wake_tx = false;
4928 mutex_lock(&mgr->lock);
4929 list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4930 list_del(&port->next);
4931 drm_dp_mst_topology_put_port(port);
4933 mutex_unlock(&mgr->lock);
4935 /* drop any tx slot msg */
4936 mutex_lock(&mstb->mgr->qlock);
4937 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4938 if (txmsg->dst != mstb)
4941 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4942 list_del(&txmsg->next);
4945 mutex_unlock(&mstb->mgr->qlock);
4948 wake_up_all(&mstb->mgr->tx_waitq);
4950 drm_dp_mst_put_mstb_malloc(mstb);
4953 static void drm_dp_delayed_destroy_work(struct work_struct *work)
4955 struct drm_dp_mst_topology_mgr *mgr =
4956 container_of(work, struct drm_dp_mst_topology_mgr,
4957 delayed_destroy_work);
4958 bool send_hotplug = false, go_again;
4961 * Not a regular list traverse as we have to drop the destroy
4962 * connector lock before destroying the mstb/port, to avoid AB->BA
4963 * ordering between this lock and the config mutex.
4969 struct drm_dp_mst_branch *mstb;
4971 mutex_lock(&mgr->delayed_destroy_lock);
4972 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4973 struct drm_dp_mst_branch,
4976 list_del(&mstb->destroy_next);
4977 mutex_unlock(&mgr->delayed_destroy_lock);
4982 drm_dp_delayed_destroy_mstb(mstb);
4987 struct drm_dp_mst_port *port;
4989 mutex_lock(&mgr->delayed_destroy_lock);
4990 port = list_first_entry_or_null(&mgr->destroy_port_list,
4991 struct drm_dp_mst_port,
4994 list_del(&port->next);
4995 mutex_unlock(&mgr->delayed_destroy_lock);
5000 drm_dp_delayed_destroy_port(port);
5001 send_hotplug = true;
5007 drm_kms_helper_hotplug_event(mgr->dev);
5010 static struct drm_private_state *
5011 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
5013 struct drm_dp_mst_topology_state *state, *old_state =
5014 to_dp_mst_topology_state(obj->state);
5015 struct drm_dp_vcpi_allocation *pos, *vcpi;
5017 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
5021 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
5023 INIT_LIST_HEAD(&state->vcpis);
5025 list_for_each_entry(pos, &old_state->vcpis, next) {
5026 /* Prune leftover freed VCPI allocations */
5030 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
5034 drm_dp_mst_get_port_malloc(vcpi->port);
5035 list_add(&vcpi->next, &state->vcpis);
5038 return &state->base;
5041 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
5042 drm_dp_mst_put_port_malloc(pos->port);
5050 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
5051 struct drm_private_state *state)
5053 struct drm_dp_mst_topology_state *mst_state =
5054 to_dp_mst_topology_state(state);
5055 struct drm_dp_vcpi_allocation *pos, *tmp;
5057 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
5058 /* We only keep references to ports with non-zero VCPIs */
5060 drm_dp_mst_put_port_malloc(pos->port);
5067 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5068 struct drm_dp_mst_branch *branch)
5070 while (port->parent) {
5071 if (port->parent == branch)
5074 if (port->parent->port_parent)
5075 port = port->parent->port_parent;
5083 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5084 struct drm_dp_mst_topology_state *state);
5087 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5088 struct drm_dp_mst_topology_state *state)
5090 struct drm_dp_vcpi_allocation *vcpi;
5091 struct drm_dp_mst_port *port;
5092 int pbn_used = 0, ret;
5095 /* Check that we have at least one port in our state that's downstream
5096 * of this branch, otherwise we can skip this branch
5098 list_for_each_entry(vcpi, &state->vcpis, next) {
5100 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
5109 if (mstb->port_parent)
5110 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5111 mstb->port_parent->parent, mstb->port_parent,
5114 DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
5117 list_for_each_entry(port, &mstb->ports, next) {
5118 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
5129 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5130 struct drm_dp_mst_topology_state *state)
5132 struct drm_dp_vcpi_allocation *vcpi;
5135 if (port->pdt == DP_PEER_DEVICE_NONE)
5138 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5141 list_for_each_entry(vcpi, &state->vcpis, next) {
5142 if (vcpi->port != port)
5153 /* This should never happen, as it means we tried to
5154 * set a mode before querying the full_pbn
5156 if (WARN_ON(!port->full_pbn))
5159 pbn_used = vcpi->pbn;
5161 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5167 if (pbn_used > port->full_pbn) {
5168 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5169 port->parent, port, pbn_used,
5174 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5175 port->parent, port, pbn_used, port->full_pbn);
5181 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
5182 struct drm_dp_mst_topology_state *mst_state)
5184 struct drm_dp_vcpi_allocation *vcpi;
5185 int avail_slots = 63, payload_count = 0;
5187 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
5188 /* Releasing VCPI is always OK-even if the port is gone */
5190 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
5195 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
5196 vcpi->port, vcpi->vcpi);
5198 avail_slots -= vcpi->vcpi;
5199 if (avail_slots < 0) {
5200 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
5201 vcpi->port, mst_state,
5202 avail_slots + vcpi->vcpi);
5206 if (++payload_count > mgr->max_payloads) {
5207 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5208 mgr, mst_state, mgr->max_payloads);
5212 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
5213 mgr, mst_state, avail_slots,
5220 * drm_dp_mst_add_affected_dsc_crtcs
5221 * @state: Pointer to the new struct drm_dp_mst_topology_state
5222 * @mgr: MST topology manager
5224 * Whenever there is a change in mst topology
5225 * DSC configuration would have to be recalculated
5226 * therefore we need to trigger modeset on all affected
5227 * CRTCs in that topology
5230 * drm_dp_mst_atomic_enable_dsc()
5232 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5234 struct drm_dp_mst_topology_state *mst_state;
5235 struct drm_dp_vcpi_allocation *pos;
5236 struct drm_connector *connector;
5237 struct drm_connector_state *conn_state;
5238 struct drm_crtc *crtc;
5239 struct drm_crtc_state *crtc_state;
5241 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5243 if (IS_ERR(mst_state))
5244 return PTR_ERR(mst_state);
5246 list_for_each_entry(pos, &mst_state->vcpis, next) {
5248 connector = pos->port->connector;
5253 conn_state = drm_atomic_get_connector_state(state, connector);
5255 if (IS_ERR(conn_state))
5256 return PTR_ERR(conn_state);
5258 crtc = conn_state->crtc;
5263 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5266 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5268 if (IS_ERR(crtc_state))
5269 return PTR_ERR(crtc_state);
5271 DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5274 crtc_state->mode_changed = true;
5278 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5281 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5282 * @state: Pointer to the new drm_atomic_state
5283 * @port: Pointer to the affected MST Port
5284 * @pbn: Newly recalculated bw required for link with DSC enabled
5285 * @pbn_div: Divider to calculate correct number of pbn per slot
5286 * @enable: Boolean flag to enable or disable DSC on the port
5288 * This function enables DSC on the given Port
5289 * by recalculating its vcpi from pbn provided
5290 * and sets dsc_enable flag to keep track of which
5291 * ports have DSC enabled
5294 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5295 struct drm_dp_mst_port *port,
5296 int pbn, int pbn_div,
5299 struct drm_dp_mst_topology_state *mst_state;
5300 struct drm_dp_vcpi_allocation *pos;
5304 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5306 if (IS_ERR(mst_state))
5307 return PTR_ERR(mst_state);
5309 list_for_each_entry(pos, &mst_state->vcpis, next) {
5310 if (pos->port == port) {
5317 DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5322 if (pos->dsc_enabled == enable) {
5323 DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5324 port, enable, pos->vcpi);
5329 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5330 DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5336 pos->dsc_enabled = enable;
5340 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5342 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5343 * atomic update is valid
5344 * @state: Pointer to the new &struct drm_dp_mst_topology_state
5346 * Checks the given topology state for an atomic update to ensure that it's
5347 * valid. This includes checking whether there's enough bandwidth to support
5348 * the new VCPI allocations in the atomic update.
5350 * Any atomic drivers supporting DP MST must make sure to call this after
5351 * checking the rest of their state in their
5352 * &drm_mode_config_funcs.atomic_check() callback.
5355 * drm_dp_atomic_find_vcpi_slots()
5356 * drm_dp_atomic_release_vcpi_slots()
5360 * 0 if the new state is valid, negative error code otherwise.
5362 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5364 struct drm_dp_mst_topology_mgr *mgr;
5365 struct drm_dp_mst_topology_state *mst_state;
5368 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5369 if (!mgr->mst_state)
5372 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5376 mutex_lock(&mgr->lock);
5377 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5379 mutex_unlock(&mgr->lock);
5388 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5390 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5391 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5392 .atomic_destroy_state = drm_dp_mst_destroy_state,
5394 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5397 * drm_atomic_get_mst_topology_state: get MST topology state
5399 * @state: global atomic state
5400 * @mgr: MST topology manager, also the private object in this case
5402 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5403 * state vtable so that the private object state returned is that of a MST
5404 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
5405 * to care of the locking, so warn if don't hold the connection_mutex.
5409 * The MST topology state or error pointer.
5411 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5412 struct drm_dp_mst_topology_mgr *mgr)
5414 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5416 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5419 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5420 * @mgr: manager struct to initialise
5421 * @dev: device providing this structure - for i2c addition.
5422 * @aux: DP helper aux channel to talk to this device
5423 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5424 * @max_payloads: maximum number of payloads this GPU can source
5425 * @conn_base_id: the connector object ID the MST device is connected to.
5427 * Return 0 for success, or negative error code on failure
5429 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5430 struct drm_device *dev, struct drm_dp_aux *aux,
5431 int max_dpcd_transaction_bytes,
5432 int max_payloads, int conn_base_id)
5434 struct drm_dp_mst_topology_state *mst_state;
5436 mutex_init(&mgr->lock);
5437 mutex_init(&mgr->qlock);
5438 mutex_init(&mgr->payload_lock);
5439 mutex_init(&mgr->delayed_destroy_lock);
5440 mutex_init(&mgr->up_req_lock);
5441 mutex_init(&mgr->probe_lock);
5442 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5443 mutex_init(&mgr->topology_ref_history_lock);
5445 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5446 INIT_LIST_HEAD(&mgr->destroy_port_list);
5447 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5448 INIT_LIST_HEAD(&mgr->up_req_list);
5451 * delayed_destroy_work will be queued on a dedicated WQ, so that any
5452 * requeuing will be also flushed when deiniting the topology manager.
5454 mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5455 if (mgr->delayed_destroy_wq == NULL)
5458 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5459 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5460 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5461 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5462 init_waitqueue_head(&mgr->tx_waitq);
5465 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5466 mgr->max_payloads = max_payloads;
5467 mgr->conn_base_id = conn_base_id;
5468 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5469 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5471 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5474 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5475 if (!mgr->proposed_vcpis)
5477 set_bit(0, &mgr->payload_mask);
5479 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5480 if (mst_state == NULL)
5483 mst_state->mgr = mgr;
5484 INIT_LIST_HEAD(&mst_state->vcpis);
5486 drm_atomic_private_obj_init(dev, &mgr->base,
5488 &drm_dp_mst_topology_state_funcs);
5492 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5495 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5496 * @mgr: manager to destroy
5498 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5500 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5501 flush_work(&mgr->work);
5502 /* The following will also drain any requeued work on the WQ. */
5503 if (mgr->delayed_destroy_wq) {
5504 destroy_workqueue(mgr->delayed_destroy_wq);
5505 mgr->delayed_destroy_wq = NULL;
5507 mutex_lock(&mgr->payload_lock);
5508 kfree(mgr->payloads);
5509 mgr->payloads = NULL;
5510 kfree(mgr->proposed_vcpis);
5511 mgr->proposed_vcpis = NULL;
5512 mutex_unlock(&mgr->payload_lock);
5515 drm_atomic_private_obj_fini(&mgr->base);
5518 mutex_destroy(&mgr->delayed_destroy_lock);
5519 mutex_destroy(&mgr->payload_lock);
5520 mutex_destroy(&mgr->qlock);
5521 mutex_destroy(&mgr->lock);
5522 mutex_destroy(&mgr->up_req_lock);
5523 mutex_destroy(&mgr->probe_lock);
5524 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5525 mutex_destroy(&mgr->topology_ref_history_lock);
5528 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5530 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5534 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5537 for (i = 0; i < num - 1; i++) {
5538 if (msgs[i].flags & I2C_M_RD ||
5543 return msgs[num - 1].flags & I2C_M_RD &&
5544 msgs[num - 1].len <= 0xff;
5547 static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5551 for (i = 0; i < num - 1; i++) {
5552 if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5557 return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5560 static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5561 struct drm_dp_mst_port *port,
5562 struct i2c_msg *msgs, int num)
5564 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5566 struct drm_dp_sideband_msg_req_body msg;
5567 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5570 memset(&msg, 0, sizeof(msg));
5571 msg.req_type = DP_REMOTE_I2C_READ;
5572 msg.u.i2c_read.num_transactions = num - 1;
5573 msg.u.i2c_read.port_number = port->port_num;
5574 for (i = 0; i < num - 1; i++) {
5575 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5576 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5577 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5578 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5580 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5581 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5583 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5590 drm_dp_encode_sideband_req(&msg, txmsg);
5592 drm_dp_queue_down_tx(mgr, txmsg);
5594 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5597 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5601 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5605 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5613 static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5614 struct drm_dp_mst_port *port,
5615 struct i2c_msg *msgs, int num)
5617 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5619 struct drm_dp_sideband_msg_req_body msg;
5620 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5623 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5628 for (i = 0; i < num; i++) {
5629 memset(&msg, 0, sizeof(msg));
5630 msg.req_type = DP_REMOTE_I2C_WRITE;
5631 msg.u.i2c_write.port_number = port->port_num;
5632 msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5633 msg.u.i2c_write.num_bytes = msgs[i].len;
5634 msg.u.i2c_write.bytes = msgs[i].buf;
5636 memset(txmsg, 0, sizeof(*txmsg));
5639 drm_dp_encode_sideband_req(&msg, txmsg);
5640 drm_dp_queue_down_tx(mgr, txmsg);
5642 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5644 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5659 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5660 struct i2c_msg *msgs, int num)
5662 struct drm_dp_aux *aux = adapter->algo_data;
5663 struct drm_dp_mst_port *port =
5664 container_of(aux, struct drm_dp_mst_port, aux);
5665 struct drm_dp_mst_branch *mstb;
5666 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5669 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5673 if (remote_i2c_read_ok(msgs, num)) {
5674 ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5675 } else if (remote_i2c_write_ok(msgs, num)) {
5676 ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5678 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5682 drm_dp_mst_topology_put_mstb(mstb);
5686 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5688 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5689 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5690 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5691 I2C_FUNC_10BIT_ADDR;
5694 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5695 .functionality = drm_dp_mst_i2c_functionality,
5696 .master_xfer = drm_dp_mst_i2c_xfer,
5700 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5701 * @port: The port to add the I2C bus on
5703 * Returns 0 on success or a negative error code on failure.
5705 static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
5707 struct drm_dp_aux *aux = &port->aux;
5708 struct device *parent_dev = port->mgr->dev->dev;
5710 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5711 aux->ddc.algo_data = aux;
5712 aux->ddc.retries = 3;
5714 aux->ddc.class = I2C_CLASS_DDC;
5715 aux->ddc.owner = THIS_MODULE;
5716 /* FIXME: set the kdev of the port's connector as parent */
5717 aux->ddc.dev.parent = parent_dev;
5718 aux->ddc.dev.of_node = parent_dev->of_node;
5720 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
5721 sizeof(aux->ddc.name));
5723 return i2c_add_adapter(&aux->ddc);
5727 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5728 * @port: The port to remove the I2C bus from
5730 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
5732 i2c_del_adapter(&port->aux.ddc);
5736 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5737 * @port: The port to check
5739 * A single physical MST hub object can be represented in the topology
5740 * by multiple branches, with virtual ports between those branches.
5742 * As of DP1.4, An MST hub with internal (virtual) ports must expose
5743 * certain DPCD registers over those ports. See sections 2.6.1.1.1
5744 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5746 * May acquire mgr->lock
5749 * true if the port is a virtual DP peer device, false otherwise
5751 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5753 struct drm_dp_mst_port *downstream_port;
5755 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5758 /* Virtual DP Sink (Internal Display Panel) */
5759 if (port->port_num >= 8)
5762 /* DP-to-HDMI Protocol Converter */
5763 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5769 mutex_lock(&port->mgr->lock);
5770 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5772 port->mstb->num_ports == 2) {
5773 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5774 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5775 !downstream_port->input) {
5776 mutex_unlock(&port->mgr->lock);
5781 mutex_unlock(&port->mgr->lock);
5787 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5788 * @port: The port to check. A leaf of the MST tree with an attached display.
5790 * Depending on the situation, DSC may be enabled via the endpoint aux,
5791 * the immediately upstream aux, or the connector's physical aux.
5793 * This is both the correct aux to read DSC_CAPABILITY and the
5794 * correct aux to write DSC_ENABLED.
5796 * This operation can be expensive (up to four aux reads), so
5797 * the caller should cache the return.
5800 * NULL if DSC cannot be enabled on this port, otherwise the aux device
5802 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5804 struct drm_dp_mst_port *immediate_upstream_port;
5805 struct drm_dp_mst_port *fec_port;
5806 struct drm_dp_desc desc = {};
5813 if (port->parent->port_parent)
5814 immediate_upstream_port = port->parent->port_parent;
5816 immediate_upstream_port = NULL;
5818 fec_port = immediate_upstream_port;
5821 * Each physical link (i.e. not a virtual port) between the
5822 * output and the primary device must support FEC
5824 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5825 !fec_port->fec_capable)
5828 fec_port = fec_port->parent->port_parent;
5831 /* DP-to-DP peer device */
5832 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5835 if (drm_dp_dpcd_read(&port->aux,
5836 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5838 if (drm_dp_dpcd_read(&port->aux,
5839 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5841 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5842 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5845 /* Enpoint decompression with DP-to-DP peer device */
5846 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5847 (endpoint_fec & DP_FEC_CAPABLE) &&
5848 (upstream_dsc & 0x2) /* DSC passthrough */)
5851 /* Virtual DPCD decompression with DP-to-DP peer device */
5852 return &immediate_upstream_port->aux;
5855 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5856 if (drm_dp_mst_is_virtual_dpcd(port))
5861 * Applies to ports for which:
5862 * - Physical aux has Synaptics OUI
5863 * - DPv1.4 or higher
5864 * - Port is on primary branch device
5865 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5867 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5870 if (drm_dp_has_quirk(&desc, 0,
5871 DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5872 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5873 port->parent == port->mgr->mst_primary) {
5876 if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5877 &downstreamport, 1) < 0)
5880 if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5881 ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5882 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5883 return port->mgr->aux;
5887 * The check below verifies if the MST sink
5888 * connected to the GPU is capable of DSC -
5889 * therefore the endpoint needs to be
5890 * both DSC and FEC capable.
5892 if (drm_dp_dpcd_read(&port->aux,
5893 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5895 if (drm_dp_dpcd_read(&port->aux,
5896 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5898 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5899 (endpoint_fec & DP_FEC_CAPABLE))
5904 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);