2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30 #include <linux/iopoll.h>
32 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
33 #include <linux/stacktrace.h>
34 #include <linux/sort.h>
35 #include <linux/timekeeping.h>
36 #include <linux/math64.h>
39 #include <drm/drm_atomic.h>
40 #include <drm/drm_atomic_helper.h>
41 #include <drm/drm_dp_mst_helper.h>
42 #include <drm/drm_drv.h>
43 #include <drm/drm_print.h>
44 #include <drm/drm_probe_helper.h>
46 #include "drm_crtc_helper_internal.h"
47 #include "drm_dp_mst_topology_internal.h"
52 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
53 * protocol. The helpers contain a topology manager and bandwidth manager.
54 * The helpers encapsulate the sending and received of sideband msgs.
56 struct drm_dp_pending_up_req {
57 struct drm_dp_sideband_msg_hdr hdr;
58 struct drm_dp_sideband_msg_req_body msg;
59 struct list_head next;
62 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
65 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
67 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
69 struct drm_dp_payload *payload);
71 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
72 struct drm_dp_mst_port *port,
73 int offset, int size, u8 *bytes);
74 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
75 struct drm_dp_mst_port *port,
76 int offset, int size, u8 *bytes);
78 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
79 struct drm_dp_mst_branch *mstb);
82 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
83 struct drm_dp_mst_branch *mstb);
85 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
86 struct drm_dp_mst_branch *mstb,
87 struct drm_dp_mst_port *port);
88 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
91 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
92 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
93 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
95 #define DBG_PREFIX "[dp_mst]"
97 #define DP_STR(x) [DP_ ## x] = #x
99 static const char *drm_dp_mst_req_type_str(u8 req_type)
101 static const char * const req_type_str[] = {
102 DP_STR(GET_MSG_TRANSACTION_VERSION),
103 DP_STR(LINK_ADDRESS),
104 DP_STR(CONNECTION_STATUS_NOTIFY),
105 DP_STR(ENUM_PATH_RESOURCES),
106 DP_STR(ALLOCATE_PAYLOAD),
107 DP_STR(QUERY_PAYLOAD),
108 DP_STR(RESOURCE_STATUS_NOTIFY),
109 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
110 DP_STR(REMOTE_DPCD_READ),
111 DP_STR(REMOTE_DPCD_WRITE),
112 DP_STR(REMOTE_I2C_READ),
113 DP_STR(REMOTE_I2C_WRITE),
114 DP_STR(POWER_UP_PHY),
115 DP_STR(POWER_DOWN_PHY),
116 DP_STR(SINK_EVENT_NOTIFY),
117 DP_STR(QUERY_STREAM_ENC_STATUS),
120 if (req_type >= ARRAY_SIZE(req_type_str) ||
121 !req_type_str[req_type])
124 return req_type_str[req_type];
128 #define DP_STR(x) [DP_NAK_ ## x] = #x
130 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
132 static const char * const nak_reason_str[] = {
133 DP_STR(WRITE_FAILURE),
134 DP_STR(INVALID_READ),
138 DP_STR(LINK_FAILURE),
139 DP_STR(NO_RESOURCES),
142 DP_STR(ALLOCATE_FAIL),
145 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
146 !nak_reason_str[nak_reason])
149 return nak_reason_str[nak_reason];
153 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
155 static const char *drm_dp_mst_sideband_tx_state_str(int state)
157 static const char * const sideband_reason_str[] = {
165 if (state >= ARRAY_SIZE(sideband_reason_str) ||
166 !sideband_reason_str[state])
169 return sideband_reason_str[state];
173 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
178 for (i = 0; i < lct; i++) {
180 unpacked_rad[i] = rad[i / 2] >> 4;
182 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
185 /* TODO: Eventually add something to printk so we can format the rad
188 return snprintf(out, len, "%*phC", lct, unpacked_rad);
191 /* sideband msg handling */
192 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
197 int number_of_bits = num_nibbles * 4;
200 while (number_of_bits != 0) {
203 remainder |= (data[array_index] & bitmask) >> bitshift;
211 if ((remainder & 0x10) == 0x10)
216 while (number_of_bits != 0) {
219 if ((remainder & 0x10) != 0)
226 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
231 int number_of_bits = number_of_bytes * 8;
234 while (number_of_bits != 0) {
237 remainder |= (data[array_index] & bitmask) >> bitshift;
245 if ((remainder & 0x100) == 0x100)
250 while (number_of_bits != 0) {
253 if ((remainder & 0x100) != 0)
257 return remainder & 0xff;
259 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
262 size += (hdr->lct / 2);
266 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
272 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
273 for (i = 0; i < (hdr->lct / 2); i++)
274 buf[idx++] = hdr->rad[i];
275 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
276 (hdr->msg_len & 0x3f);
277 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
279 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
280 buf[idx - 1] |= (crc4 & 0xf);
285 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
286 u8 *buf, int buflen, u8 *hdrlen)
295 len += ((buf[0] & 0xf0) >> 4) / 2;
298 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
300 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
301 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
305 hdr->lct = (buf[0] & 0xf0) >> 4;
306 hdr->lcr = (buf[0] & 0xf);
308 for (i = 0; i < (hdr->lct / 2); i++)
309 hdr->rad[i] = buf[idx++];
310 hdr->broadcast = (buf[idx] >> 7) & 0x1;
311 hdr->path_msg = (buf[idx] >> 6) & 0x1;
312 hdr->msg_len = buf[idx] & 0x3f;
314 hdr->somt = (buf[idx] >> 7) & 0x1;
315 hdr->eomt = (buf[idx] >> 6) & 0x1;
316 hdr->seqno = (buf[idx] >> 4) & 0x1;
323 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
324 struct drm_dp_sideband_msg_tx *raw)
329 buf[idx++] = req->req_type & 0x7f;
331 switch (req->req_type) {
332 case DP_ENUM_PATH_RESOURCES:
333 case DP_POWER_DOWN_PHY:
334 case DP_POWER_UP_PHY:
335 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
338 case DP_ALLOCATE_PAYLOAD:
339 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
340 (req->u.allocate_payload.number_sdp_streams & 0xf);
342 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
344 buf[idx] = (req->u.allocate_payload.pbn >> 8);
346 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
348 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
349 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
350 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
353 if (req->u.allocate_payload.number_sdp_streams & 1) {
354 i = req->u.allocate_payload.number_sdp_streams - 1;
355 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
359 case DP_QUERY_PAYLOAD:
360 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
362 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
365 case DP_REMOTE_DPCD_READ:
366 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
367 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
369 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
371 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
373 buf[idx] = (req->u.dpcd_read.num_bytes);
377 case DP_REMOTE_DPCD_WRITE:
378 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
379 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
381 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
383 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
385 buf[idx] = (req->u.dpcd_write.num_bytes);
387 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
388 idx += req->u.dpcd_write.num_bytes;
390 case DP_REMOTE_I2C_READ:
391 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
392 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
394 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
395 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
397 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
399 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
400 idx += req->u.i2c_read.transactions[i].num_bytes;
402 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
403 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
406 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
408 buf[idx] = (req->u.i2c_read.num_bytes_read);
412 case DP_REMOTE_I2C_WRITE:
413 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
415 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
417 buf[idx] = (req->u.i2c_write.num_bytes);
419 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
420 idx += req->u.i2c_write.num_bytes;
425 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
427 /* Decode a sideband request we've encoded, mainly used for debugging */
429 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
430 struct drm_dp_sideband_msg_req_body *req)
432 const u8 *buf = raw->msg;
435 req->req_type = buf[idx++] & 0x7f;
436 switch (req->req_type) {
437 case DP_ENUM_PATH_RESOURCES:
438 case DP_POWER_DOWN_PHY:
439 case DP_POWER_UP_PHY:
440 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
442 case DP_ALLOCATE_PAYLOAD:
444 struct drm_dp_allocate_payload *a =
445 &req->u.allocate_payload;
447 a->number_sdp_streams = buf[idx] & 0xf;
448 a->port_number = (buf[idx] >> 4) & 0xf;
450 WARN_ON(buf[++idx] & 0x80);
451 a->vcpi = buf[idx] & 0x7f;
453 a->pbn = buf[++idx] << 8;
454 a->pbn |= buf[++idx];
457 for (i = 0; i < a->number_sdp_streams; i++) {
458 a->sdp_stream_sink[i] =
459 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
463 case DP_QUERY_PAYLOAD:
464 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
465 WARN_ON(buf[++idx] & 0x80);
466 req->u.query_payload.vcpi = buf[idx] & 0x7f;
468 case DP_REMOTE_DPCD_READ:
470 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
472 r->port_number = (buf[idx] >> 4) & 0xf;
474 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
475 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
476 r->dpcd_address |= buf[++idx] & 0xff;
478 r->num_bytes = buf[++idx];
481 case DP_REMOTE_DPCD_WRITE:
483 struct drm_dp_remote_dpcd_write *w =
486 w->port_number = (buf[idx] >> 4) & 0xf;
488 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
489 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
490 w->dpcd_address |= buf[++idx] & 0xff;
492 w->num_bytes = buf[++idx];
494 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
500 case DP_REMOTE_I2C_READ:
502 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
503 struct drm_dp_remote_i2c_read_tx *tx;
506 r->num_transactions = buf[idx] & 0x3;
507 r->port_number = (buf[idx] >> 4) & 0xf;
508 for (i = 0; i < r->num_transactions; i++) {
509 tx = &r->transactions[i];
511 tx->i2c_dev_id = buf[++idx] & 0x7f;
512 tx->num_bytes = buf[++idx];
513 tx->bytes = kmemdup(&buf[++idx],
520 idx += tx->num_bytes;
521 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
522 tx->i2c_transaction_delay = buf[idx] & 0xf;
526 for (i = 0; i < r->num_transactions; i++) {
527 tx = &r->transactions[i];
533 r->read_i2c_device_id = buf[++idx] & 0x7f;
534 r->num_bytes_read = buf[++idx];
537 case DP_REMOTE_I2C_WRITE:
539 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
541 w->port_number = (buf[idx] >> 4) & 0xf;
542 w->write_i2c_device_id = buf[++idx] & 0x7f;
543 w->num_bytes = buf[++idx];
544 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
554 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
557 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
558 int indent, struct drm_printer *printer)
562 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
563 if (req->req_type == DP_LINK_ADDRESS) {
564 /* No contents to print */
565 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
569 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
572 switch (req->req_type) {
573 case DP_ENUM_PATH_RESOURCES:
574 case DP_POWER_DOWN_PHY:
575 case DP_POWER_UP_PHY:
576 P("port=%d\n", req->u.port_num.port_number);
578 case DP_ALLOCATE_PAYLOAD:
579 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
580 req->u.allocate_payload.port_number,
581 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
582 req->u.allocate_payload.number_sdp_streams,
583 req->u.allocate_payload.number_sdp_streams,
584 req->u.allocate_payload.sdp_stream_sink);
586 case DP_QUERY_PAYLOAD:
587 P("port=%d vcpi=%d\n",
588 req->u.query_payload.port_number,
589 req->u.query_payload.vcpi);
591 case DP_REMOTE_DPCD_READ:
592 P("port=%d dpcd_addr=%05x len=%d\n",
593 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
594 req->u.dpcd_read.num_bytes);
596 case DP_REMOTE_DPCD_WRITE:
597 P("port=%d addr=%05x len=%d: %*ph\n",
598 req->u.dpcd_write.port_number,
599 req->u.dpcd_write.dpcd_address,
600 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
601 req->u.dpcd_write.bytes);
603 case DP_REMOTE_I2C_READ:
604 P("port=%d num_tx=%d id=%d size=%d:\n",
605 req->u.i2c_read.port_number,
606 req->u.i2c_read.num_transactions,
607 req->u.i2c_read.read_i2c_device_id,
608 req->u.i2c_read.num_bytes_read);
611 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
612 const struct drm_dp_remote_i2c_read_tx *rtx =
613 &req->u.i2c_read.transactions[i];
615 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
616 i, rtx->i2c_dev_id, rtx->num_bytes,
617 rtx->no_stop_bit, rtx->i2c_transaction_delay,
618 rtx->num_bytes, rtx->bytes);
621 case DP_REMOTE_I2C_WRITE:
622 P("port=%d id=%d size=%d: %*ph\n",
623 req->u.i2c_write.port_number,
624 req->u.i2c_write.write_i2c_device_id,
625 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
626 req->u.i2c_write.bytes);
634 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
637 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
638 const struct drm_dp_sideband_msg_tx *txmsg)
640 struct drm_dp_sideband_msg_req_body req;
645 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
647 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
648 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
649 drm_dp_mst_sideband_tx_state_str(txmsg->state),
650 txmsg->path_msg, buf);
652 ret = drm_dp_decode_sideband_req(txmsg, &req);
654 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
657 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
659 switch (req.req_type) {
660 case DP_REMOTE_DPCD_WRITE:
661 kfree(req.u.dpcd_write.bytes);
663 case DP_REMOTE_I2C_READ:
664 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
665 kfree(req.u.i2c_read.transactions[i].bytes);
667 case DP_REMOTE_I2C_WRITE:
668 kfree(req.u.i2c_write.bytes);
673 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
676 crc4 = drm_dp_msg_data_crc4(msg, len);
680 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
681 struct drm_dp_sideband_msg_tx *raw)
686 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
691 static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
692 struct drm_dp_sideband_msg_hdr *hdr,
696 * ignore out-of-order messages or messages that are part of a
699 if (!hdr->somt && !msg->have_somt)
702 /* get length contained in this portion */
703 msg->curchunk_idx = 0;
704 msg->curchunk_len = hdr->msg_len;
705 msg->curchunk_hdrlen = hdrlen;
707 /* we have already gotten an somt - don't bother parsing */
708 if (hdr->somt && msg->have_somt)
712 memcpy(&msg->initial_hdr, hdr,
713 sizeof(struct drm_dp_sideband_msg_hdr));
714 msg->have_somt = true;
717 msg->have_eomt = true;
722 /* this adds a chunk of msg to the builder to get the final msg */
723 static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
724 u8 *replybuf, u8 replybuflen)
728 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
729 msg->curchunk_idx += replybuflen;
731 if (msg->curchunk_idx >= msg->curchunk_len) {
733 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
734 if (crc4 != msg->chunk[msg->curchunk_len - 1])
735 print_hex_dump(KERN_DEBUG, "wrong crc",
736 DUMP_PREFIX_NONE, 16, 1,
737 msg->chunk, msg->curchunk_len, false);
738 /* copy chunk into bigger msg */
739 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
740 msg->curlen += msg->curchunk_len - 1;
745 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
746 struct drm_dp_sideband_msg_reply_body *repmsg)
750 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
752 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
754 if (idx > raw->curlen)
756 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
757 if (raw->msg[idx] & 0x80)
758 repmsg->u.link_addr.ports[i].input_port = 1;
760 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
761 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
764 if (idx > raw->curlen)
766 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
767 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
768 if (repmsg->u.link_addr.ports[i].input_port == 0)
769 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
771 if (idx > raw->curlen)
773 if (repmsg->u.link_addr.ports[i].input_port == 0) {
774 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
776 if (idx > raw->curlen)
778 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
780 if (idx > raw->curlen)
782 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
783 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
787 if (idx > raw->curlen)
793 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
797 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
798 struct drm_dp_sideband_msg_reply_body *repmsg)
801 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
803 if (idx > raw->curlen)
805 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
807 if (idx > raw->curlen)
810 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
813 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
817 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
818 struct drm_dp_sideband_msg_reply_body *repmsg)
821 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
823 if (idx > raw->curlen)
827 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
831 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
832 struct drm_dp_sideband_msg_reply_body *repmsg)
836 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
838 if (idx > raw->curlen)
840 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
843 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
846 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
850 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
851 struct drm_dp_sideband_msg_reply_body *repmsg)
854 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
855 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
857 if (idx > raw->curlen)
859 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
861 if (idx > raw->curlen)
863 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
865 if (idx > raw->curlen)
869 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
873 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
874 struct drm_dp_sideband_msg_reply_body *repmsg)
877 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
879 if (idx > raw->curlen)
881 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
883 if (idx > raw->curlen)
885 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
887 if (idx > raw->curlen)
891 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
895 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
896 struct drm_dp_sideband_msg_reply_body *repmsg)
899 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
901 if (idx > raw->curlen)
903 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
905 if (idx > raw->curlen)
909 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
913 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
914 struct drm_dp_sideband_msg_reply_body *repmsg)
918 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
920 if (idx > raw->curlen) {
921 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
928 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
929 struct drm_dp_sideband_msg_reply_body *msg)
931 memset(msg, 0, sizeof(*msg));
932 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
933 msg->req_type = (raw->msg[0] & 0x7f);
935 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
936 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
937 msg->u.nak.reason = raw->msg[17];
938 msg->u.nak.nak_data = raw->msg[18];
942 switch (msg->req_type) {
943 case DP_LINK_ADDRESS:
944 return drm_dp_sideband_parse_link_address(raw, msg);
945 case DP_QUERY_PAYLOAD:
946 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
947 case DP_REMOTE_DPCD_READ:
948 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
949 case DP_REMOTE_DPCD_WRITE:
950 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
951 case DP_REMOTE_I2C_READ:
952 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
953 case DP_ENUM_PATH_RESOURCES:
954 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
955 case DP_ALLOCATE_PAYLOAD:
956 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
957 case DP_POWER_DOWN_PHY:
958 case DP_POWER_UP_PHY:
959 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
960 case DP_CLEAR_PAYLOAD_ID_TABLE:
961 return true; /* since there's nothing to parse */
963 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
964 drm_dp_mst_req_type_str(msg->req_type));
969 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
970 struct drm_dp_sideband_msg_req_body *msg)
974 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
976 if (idx > raw->curlen)
979 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
981 if (idx > raw->curlen)
984 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
985 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
986 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
987 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
988 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
992 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
996 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
997 struct drm_dp_sideband_msg_req_body *msg)
1001 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1003 if (idx > raw->curlen)
1006 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1008 if (idx > raw->curlen)
1011 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1015 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1019 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1020 struct drm_dp_sideband_msg_req_body *msg)
1022 memset(msg, 0, sizeof(*msg));
1023 msg->req_type = (raw->msg[0] & 0x7f);
1025 switch (msg->req_type) {
1026 case DP_CONNECTION_STATUS_NOTIFY:
1027 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1028 case DP_RESOURCE_STATUS_NOTIFY:
1029 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1031 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1032 drm_dp_mst_req_type_str(msg->req_type));
1037 static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1038 u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1040 struct drm_dp_sideband_msg_req_body req;
1042 req.req_type = DP_REMOTE_DPCD_WRITE;
1043 req.u.dpcd_write.port_number = port_num;
1044 req.u.dpcd_write.dpcd_address = offset;
1045 req.u.dpcd_write.num_bytes = num_bytes;
1046 req.u.dpcd_write.bytes = bytes;
1047 drm_dp_encode_sideband_req(&req, msg);
1050 static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
1052 struct drm_dp_sideband_msg_req_body req;
1054 req.req_type = DP_LINK_ADDRESS;
1055 drm_dp_encode_sideband_req(&req, msg);
1058 static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1060 struct drm_dp_sideband_msg_req_body req;
1062 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1063 drm_dp_encode_sideband_req(&req, msg);
1066 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1069 struct drm_dp_sideband_msg_req_body req;
1071 req.req_type = DP_ENUM_PATH_RESOURCES;
1072 req.u.port_num.port_number = port_num;
1073 drm_dp_encode_sideband_req(&req, msg);
1074 msg->path_msg = true;
1078 static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1080 u8 vcpi, uint16_t pbn,
1081 u8 number_sdp_streams,
1082 u8 *sdp_stream_sink)
1084 struct drm_dp_sideband_msg_req_body req;
1085 memset(&req, 0, sizeof(req));
1086 req.req_type = DP_ALLOCATE_PAYLOAD;
1087 req.u.allocate_payload.port_number = port_num;
1088 req.u.allocate_payload.vcpi = vcpi;
1089 req.u.allocate_payload.pbn = pbn;
1090 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1091 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1092 number_sdp_streams);
1093 drm_dp_encode_sideband_req(&req, msg);
1094 msg->path_msg = true;
1097 static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1098 int port_num, bool power_up)
1100 struct drm_dp_sideband_msg_req_body req;
1103 req.req_type = DP_POWER_UP_PHY;
1105 req.req_type = DP_POWER_DOWN_PHY;
1107 req.u.port_num.port_number = port_num;
1108 drm_dp_encode_sideband_req(&req, msg);
1109 msg->path_msg = true;
1112 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1113 struct drm_dp_vcpi *vcpi)
1117 mutex_lock(&mgr->payload_lock);
1118 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1119 if (ret > mgr->max_payloads) {
1121 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1125 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1126 if (vcpi_ret > mgr->max_payloads) {
1128 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1132 set_bit(ret, &mgr->payload_mask);
1133 set_bit(vcpi_ret, &mgr->vcpi_mask);
1134 vcpi->vcpi = vcpi_ret + 1;
1135 mgr->proposed_vcpis[ret - 1] = vcpi;
1137 mutex_unlock(&mgr->payload_lock);
1141 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1148 mutex_lock(&mgr->payload_lock);
1149 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1150 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1152 for (i = 0; i < mgr->max_payloads; i++) {
1153 if (mgr->proposed_vcpis[i] &&
1154 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1155 mgr->proposed_vcpis[i] = NULL;
1156 clear_bit(i + 1, &mgr->payload_mask);
1159 mutex_unlock(&mgr->payload_lock);
1162 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1163 struct drm_dp_sideband_msg_tx *txmsg)
1168 * All updates to txmsg->state are protected by mgr->qlock, and the two
1169 * cases we check here are terminal states. For those the barriers
1170 * provided by the wake_up/wait_event pair are enough.
1172 state = READ_ONCE(txmsg->state);
1173 return (state == DRM_DP_SIDEBAND_TX_RX ||
1174 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1177 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1178 struct drm_dp_sideband_msg_tx *txmsg)
1180 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1183 ret = wait_event_timeout(mgr->tx_waitq,
1184 check_txmsg_state(mgr, txmsg),
1186 mutex_lock(&mstb->mgr->qlock);
1188 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1193 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1195 /* dump some state */
1199 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1200 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1201 list_del(&txmsg->next);
1204 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1205 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1206 mstb->tx_slots[txmsg->seqno] = NULL;
1210 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1211 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1213 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1215 mutex_unlock(&mgr->qlock);
1220 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1222 struct drm_dp_mst_branch *mstb;
1224 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1230 memcpy(mstb->rad, rad, lct / 2);
1231 INIT_LIST_HEAD(&mstb->ports);
1232 kref_init(&mstb->topology_kref);
1233 kref_init(&mstb->malloc_kref);
1237 static void drm_dp_free_mst_branch_device(struct kref *kref)
1239 struct drm_dp_mst_branch *mstb =
1240 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1242 if (mstb->port_parent)
1243 drm_dp_mst_put_port_malloc(mstb->port_parent);
1249 * DOC: Branch device and port refcounting
1251 * Topology refcount overview
1252 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1254 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1255 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1256 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1258 * Topology refcounts are not exposed to drivers, and are handled internally
1259 * by the DP MST helpers. The helpers use them in order to prevent the
1260 * in-memory topology state from being changed in the middle of critical
1261 * operations like changing the internal state of payload allocations. This
1262 * means each branch and port will be considered to be connected to the rest
1263 * of the topology until its topology refcount reaches zero. Additionally,
1264 * for ports this means that their associated &struct drm_connector will stay
1265 * registered with userspace until the port's refcount reaches 0.
1267 * Malloc refcount overview
1268 * ~~~~~~~~~~~~~~~~~~~~~~~~
1270 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1271 * drm_dp_mst_branch allocated even after all of its topology references have
1272 * been dropped, so that the driver or MST helpers can safely access each
1273 * branch's last known state before it was disconnected from the topology.
1274 * When the malloc refcount of a port or branch reaches 0, the memory
1275 * allocation containing the &struct drm_dp_mst_branch or &struct
1276 * drm_dp_mst_port respectively will be freed.
1278 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1279 * to drivers. As of writing this documentation, there are no drivers that
1280 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1281 * helpers. Exposing this API to drivers in a race-free manner would take more
1282 * tweaking of the refcounting scheme, however patches are welcome provided
1283 * there is a legitimate driver usecase for this.
1285 * Refcount relationships in a topology
1286 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1288 * Let's take a look at why the relationship between topology and malloc
1289 * refcounts is designed the way it is.
1291 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1293 * An example of topology and malloc refs in a DP MST topology with two
1294 * active payloads. Topology refcount increments are indicated by solid
1295 * lines, and malloc refcount increments are indicated by dashed lines.
1296 * Each starts from the branch which incremented the refcount, and ends at
1297 * the branch to which the refcount belongs to, i.e. the arrow points the
1298 * same way as the C pointers used to reference a structure.
1300 * As you can see in the above figure, every branch increments the topology
1301 * refcount of its children, and increments the malloc refcount of its
1302 * parent. Additionally, every payload increments the malloc refcount of its
1303 * assigned port by 1.
1305 * So, what would happen if MSTB #3 from the above figure was unplugged from
1306 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1307 * topology would start to look like the figure below.
1309 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1311 * Ports and branch devices which have been released from memory are
1312 * colored grey, and references which have been removed are colored red.
1314 * Whenever a port or branch device's topology refcount reaches zero, it will
1315 * decrement the topology refcounts of all its children, the malloc refcount
1316 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1317 * #4, this means they both have been disconnected from the topology and freed
1318 * from memory. But, because payload #2 is still holding a reference to port
1319 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1320 * is still accessible from memory. This also means port #3 has not yet
1321 * decremented the malloc refcount of MSTB #3, so its &struct
1322 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1323 * malloc refcount reaches 0.
1325 * This relationship is necessary because in order to release payload #2, we
1326 * need to be able to figure out the last relative of port #3 that's still
1327 * connected to the topology. In this case, we would travel up the topology as
1330 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1332 * And finally, remove payload #2 by communicating with port #2 through
1333 * sideband transactions.
1337 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1339 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1341 * Increments &drm_dp_mst_branch.malloc_kref. When
1342 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1343 * will be released and @mstb may no longer be used.
1345 * See also: drm_dp_mst_put_mstb_malloc()
1348 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1350 kref_get(&mstb->malloc_kref);
1351 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1355 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1357 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1359 * Decrements &drm_dp_mst_branch.malloc_kref. When
1360 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1361 * will be released and @mstb may no longer be used.
1363 * See also: drm_dp_mst_get_mstb_malloc()
1366 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1368 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1369 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1372 static void drm_dp_free_mst_port(struct kref *kref)
1374 struct drm_dp_mst_port *port =
1375 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1377 drm_dp_mst_put_mstb_malloc(port->parent);
1382 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1383 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1385 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1386 * reaches 0, the memory allocation for @port will be released and @port may
1387 * no longer be used.
1389 * Because @port could potentially be freed at any time by the DP MST helpers
1390 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1391 * function, drivers that which to make use of &struct drm_dp_mst_port should
1392 * ensure that they grab at least one main malloc reference to their MST ports
1393 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1394 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1396 * See also: drm_dp_mst_put_port_malloc()
1399 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1401 kref_get(&port->malloc_kref);
1402 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1404 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1407 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1408 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1410 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1411 * reaches 0, the memory allocation for @port will be released and @port may
1412 * no longer be used.
1414 * See also: drm_dp_mst_get_port_malloc()
1417 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1419 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1420 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1422 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1424 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1426 #define STACK_DEPTH 8
1428 static noinline void
1429 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1430 struct drm_dp_mst_topology_ref_history *history,
1431 enum drm_dp_mst_topology_ref_type type)
1433 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1434 depot_stack_handle_t backtrace;
1435 ulong stack_entries[STACK_DEPTH];
1439 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1440 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1444 /* Try to find an existing entry for this backtrace */
1445 for (i = 0; i < history->len; i++) {
1446 if (history->entries[i].backtrace == backtrace) {
1447 entry = &history->entries[i];
1452 /* Otherwise add one */
1454 struct drm_dp_mst_topology_ref_entry *new;
1455 int new_len = history->len + 1;
1457 new = krealloc(history->entries, sizeof(*new) * new_len,
1462 entry = &new[history->len];
1463 history->len = new_len;
1464 history->entries = new;
1466 entry->backtrace = backtrace;
1471 entry->ts_nsec = ktime_get_ns();
1475 topology_ref_history_cmp(const void *a, const void *b)
1477 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1479 if (entry_a->ts_nsec > entry_b->ts_nsec)
1481 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1487 static inline const char *
1488 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1490 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1497 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1498 void *ptr, const char *type_str)
1500 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1501 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1510 /* First, sort the list so that it goes from oldest to newest
1513 sort(history->entries, history->len, sizeof(*history->entries),
1514 topology_ref_history_cmp, NULL);
1516 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1519 for (i = 0; i < history->len; i++) {
1520 const struct drm_dp_mst_topology_ref_entry *entry =
1521 &history->entries[i];
1524 u64 ts_nsec = entry->ts_nsec;
1525 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1527 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1528 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1530 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1532 topology_ref_type_to_str(entry->type),
1533 ts_nsec, rem_nsec / 1000, buf);
1536 /* Now free the history, since this is the only time we expose it */
1537 kfree(history->entries);
1542 static __always_inline void
1543 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1545 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1549 static __always_inline void
1550 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1552 __dump_topology_ref_history(&port->topology_ref_history, port,
1556 static __always_inline void
1557 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1558 enum drm_dp_mst_topology_ref_type type)
1560 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1563 static __always_inline void
1564 save_port_topology_ref(struct drm_dp_mst_port *port,
1565 enum drm_dp_mst_topology_ref_type type)
1567 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1571 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1573 mutex_lock(&mgr->topology_ref_history_lock);
1577 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1579 mutex_unlock(&mgr->topology_ref_history_lock);
1583 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1585 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1587 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1589 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1590 #define save_mstb_topology_ref(mstb, type)
1591 #define save_port_topology_ref(port, type)
1594 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1596 struct drm_dp_mst_branch *mstb =
1597 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1598 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1600 drm_dp_mst_dump_mstb_topology_history(mstb);
1602 INIT_LIST_HEAD(&mstb->destroy_next);
1605 * This can get called under mgr->mutex, so we need to perform the
1606 * actual destruction of the mstb in another worker
1608 mutex_lock(&mgr->delayed_destroy_lock);
1609 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1610 mutex_unlock(&mgr->delayed_destroy_lock);
1611 schedule_work(&mgr->delayed_destroy_work);
1615 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1616 * branch device unless it's zero
1617 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1619 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1620 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1621 * reached 0). Holding a topology reference implies that a malloc reference
1622 * will be held to @mstb as long as the user holds the topology reference.
1624 * Care should be taken to ensure that the user has at least one malloc
1625 * reference to @mstb. If you already have a topology reference to @mstb, you
1626 * should use drm_dp_mst_topology_get_mstb() instead.
1629 * drm_dp_mst_topology_get_mstb()
1630 * drm_dp_mst_topology_put_mstb()
1633 * * 1: A topology reference was grabbed successfully
1634 * * 0: @port is no longer in the topology, no reference was grabbed
1636 static int __must_check
1637 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1641 topology_ref_history_lock(mstb->mgr);
1642 ret = kref_get_unless_zero(&mstb->topology_kref);
1644 DRM_DEBUG("mstb %p (%d)\n",
1645 mstb, kref_read(&mstb->topology_kref));
1646 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1649 topology_ref_history_unlock(mstb->mgr);
1655 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1657 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1659 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1660 * not it's already reached 0. This is only valid to use in scenarios where
1661 * you are already guaranteed to have at least one active topology reference
1662 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1665 * drm_dp_mst_topology_try_get_mstb()
1666 * drm_dp_mst_topology_put_mstb()
1668 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1670 topology_ref_history_lock(mstb->mgr);
1672 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1673 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1674 kref_get(&mstb->topology_kref);
1675 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1677 topology_ref_history_unlock(mstb->mgr);
1681 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1683 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1685 * Releases a topology reference from @mstb by decrementing
1686 * &drm_dp_mst_branch.topology_kref.
1689 * drm_dp_mst_topology_try_get_mstb()
1690 * drm_dp_mst_topology_get_mstb()
1693 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1695 topology_ref_history_lock(mstb->mgr);
1697 DRM_DEBUG("mstb %p (%d)\n",
1698 mstb, kref_read(&mstb->topology_kref) - 1);
1699 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1701 topology_ref_history_unlock(mstb->mgr);
1702 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1705 static void drm_dp_destroy_port(struct kref *kref)
1707 struct drm_dp_mst_port *port =
1708 container_of(kref, struct drm_dp_mst_port, topology_kref);
1709 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1711 drm_dp_mst_dump_port_topology_history(port);
1713 /* There's nothing that needs locking to destroy an input port yet */
1715 drm_dp_mst_put_port_malloc(port);
1719 kfree(port->cached_edid);
1722 * we can't destroy the connector here, as we might be holding the
1723 * mode_config.mutex from an EDID retrieval
1725 mutex_lock(&mgr->delayed_destroy_lock);
1726 list_add(&port->next, &mgr->destroy_port_list);
1727 mutex_unlock(&mgr->delayed_destroy_lock);
1728 schedule_work(&mgr->delayed_destroy_work);
1732 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1733 * port unless it's zero
1734 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1736 * Attempts to grab a topology reference to @port, if it hasn't yet been
1737 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1738 * 0). Holding a topology reference implies that a malloc reference will be
1739 * held to @port as long as the user holds the topology reference.
1741 * Care should be taken to ensure that the user has at least one malloc
1742 * reference to @port. If you already have a topology reference to @port, you
1743 * should use drm_dp_mst_topology_get_port() instead.
1746 * drm_dp_mst_topology_get_port()
1747 * drm_dp_mst_topology_put_port()
1750 * * 1: A topology reference was grabbed successfully
1751 * * 0: @port is no longer in the topology, no reference was grabbed
1753 static int __must_check
1754 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1758 topology_ref_history_lock(port->mgr);
1759 ret = kref_get_unless_zero(&port->topology_kref);
1761 DRM_DEBUG("port %p (%d)\n",
1762 port, kref_read(&port->topology_kref));
1763 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1766 topology_ref_history_unlock(port->mgr);
1771 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1772 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1774 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1775 * not it's already reached 0. This is only valid to use in scenarios where
1776 * you are already guaranteed to have at least one active topology reference
1777 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1780 * drm_dp_mst_topology_try_get_port()
1781 * drm_dp_mst_topology_put_port()
1783 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1785 topology_ref_history_lock(port->mgr);
1787 WARN_ON(kref_read(&port->topology_kref) == 0);
1788 kref_get(&port->topology_kref);
1789 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1790 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1792 topology_ref_history_unlock(port->mgr);
1796 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1797 * @port: The &struct drm_dp_mst_port to release the topology reference from
1799 * Releases a topology reference from @port by decrementing
1800 * &drm_dp_mst_port.topology_kref.
1803 * drm_dp_mst_topology_try_get_port()
1804 * drm_dp_mst_topology_get_port()
1806 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1808 topology_ref_history_lock(port->mgr);
1810 DRM_DEBUG("port %p (%d)\n",
1811 port, kref_read(&port->topology_kref) - 1);
1812 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1814 topology_ref_history_unlock(port->mgr);
1815 kref_put(&port->topology_kref, drm_dp_destroy_port);
1818 static struct drm_dp_mst_branch *
1819 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1820 struct drm_dp_mst_branch *to_find)
1822 struct drm_dp_mst_port *port;
1823 struct drm_dp_mst_branch *rmstb;
1825 if (to_find == mstb)
1828 list_for_each_entry(port, &mstb->ports, next) {
1830 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1831 port->mstb, to_find);
1839 static struct drm_dp_mst_branch *
1840 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1841 struct drm_dp_mst_branch *mstb)
1843 struct drm_dp_mst_branch *rmstb = NULL;
1845 mutex_lock(&mgr->lock);
1846 if (mgr->mst_primary) {
1847 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1848 mgr->mst_primary, mstb);
1850 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1853 mutex_unlock(&mgr->lock);
1857 static struct drm_dp_mst_port *
1858 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1859 struct drm_dp_mst_port *to_find)
1861 struct drm_dp_mst_port *port, *mport;
1863 list_for_each_entry(port, &mstb->ports, next) {
1864 if (port == to_find)
1868 mport = drm_dp_mst_topology_get_port_validated_locked(
1869 port->mstb, to_find);
1877 static struct drm_dp_mst_port *
1878 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1879 struct drm_dp_mst_port *port)
1881 struct drm_dp_mst_port *rport = NULL;
1883 mutex_lock(&mgr->lock);
1884 if (mgr->mst_primary) {
1885 rport = drm_dp_mst_topology_get_port_validated_locked(
1886 mgr->mst_primary, port);
1888 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1891 mutex_unlock(&mgr->lock);
1895 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1897 struct drm_dp_mst_port *port;
1900 list_for_each_entry(port, &mstb->ports, next) {
1901 if (port->port_num == port_num) {
1902 ret = drm_dp_mst_topology_try_get_port(port);
1903 return ret ? port : NULL;
1911 * calculate a new RAD for this MST branch device
1912 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1913 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1915 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1918 int parent_lct = port->parent->lct;
1920 int idx = (parent_lct - 1) / 2;
1921 if (parent_lct > 1) {
1922 memcpy(rad, port->parent->rad, idx + 1);
1923 shift = (parent_lct % 2) ? 4 : 0;
1927 rad[idx] |= port->port_num << shift;
1928 return parent_lct + 1;
1931 static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
1934 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1935 case DP_PEER_DEVICE_SST_SINK:
1937 case DP_PEER_DEVICE_MST_BRANCHING:
1938 /* For sst branch device */
1948 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
1951 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1952 struct drm_dp_mst_branch *mstb;
1956 if (port->pdt == new_pdt && port->mcs == new_mcs)
1959 /* Teardown the old pdt, if there is one */
1960 if (port->pdt != DP_PEER_DEVICE_NONE) {
1961 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
1963 * If the new PDT would also have an i2c bus,
1964 * don't bother with reregistering it
1966 if (new_pdt != DP_PEER_DEVICE_NONE &&
1967 drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
1968 port->pdt = new_pdt;
1969 port->mcs = new_mcs;
1973 /* remove i2c over sideband */
1974 drm_dp_mst_unregister_i2c_bus(&port->aux);
1976 mutex_lock(&mgr->lock);
1977 drm_dp_mst_topology_put_mstb(port->mstb);
1979 mutex_unlock(&mgr->lock);
1983 port->pdt = new_pdt;
1984 port->mcs = new_mcs;
1986 if (port->pdt != DP_PEER_DEVICE_NONE) {
1987 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
1988 /* add i2c over sideband */
1989 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1991 lct = drm_dp_calculate_rad(port, rad);
1992 mstb = drm_dp_add_mst_branch_device(lct, rad);
1995 DRM_ERROR("Failed to create MSTB for port %p",
2000 mutex_lock(&mgr->lock);
2002 mstb->mgr = port->mgr;
2003 mstb->port_parent = port;
2006 * Make sure this port's memory allocation stays
2007 * around until its child MSTB releases it
2009 drm_dp_mst_get_port_malloc(port);
2010 mutex_unlock(&mgr->lock);
2012 /* And make sure we send a link address for this */
2019 port->pdt = DP_PEER_DEVICE_NONE;
2024 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2025 * @aux: Fake sideband AUX CH
2026 * @offset: address of the (first) register to read
2027 * @buffer: buffer to store the register values
2028 * @size: number of bytes in @buffer
2030 * Performs the same functionality for remote devices via
2031 * sideband messaging as drm_dp_dpcd_read() does for local
2032 * devices via actual AUX CH.
2034 * Return: Number of bytes read, or negative error code on failure.
2036 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2037 unsigned int offset, void *buffer, size_t size)
2039 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2042 return drm_dp_send_dpcd_read(port->mgr, port,
2043 offset, size, buffer);
2047 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2048 * @aux: Fake sideband AUX CH
2049 * @offset: address of the (first) register to write
2050 * @buffer: buffer containing the values to write
2051 * @size: number of bytes in @buffer
2053 * Performs the same functionality for remote devices via
2054 * sideband messaging as drm_dp_dpcd_write() does for local
2055 * devices via actual AUX CH.
2057 * Return: number of bytes written on success, negative error code on failure.
2059 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2060 unsigned int offset, void *buffer, size_t size)
2062 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2065 return drm_dp_send_dpcd_write(port->mgr, port,
2066 offset, size, buffer);
2069 static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2073 memcpy(mstb->guid, guid, 16);
2075 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2076 if (mstb->port_parent) {
2077 ret = drm_dp_send_dpcd_write(mstb->mgr,
2079 DP_GUID, 16, mstb->guid);
2081 ret = drm_dp_dpcd_write(mstb->mgr->aux,
2082 DP_GUID, mstb->guid, 16);
2086 if (ret < 16 && ret > 0)
2089 return ret == 16 ? 0 : ret;
2092 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2095 size_t proppath_size)
2099 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2100 for (i = 0; i < (mstb->lct - 1); i++) {
2101 int shift = (i % 2) ? 0 : 4;
2102 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2103 snprintf(temp, sizeof(temp), "-%d", port_num);
2104 strlcat(proppath, temp, proppath_size);
2106 snprintf(temp, sizeof(temp), "-%d", pnum);
2107 strlcat(proppath, temp, proppath_size);
2111 * drm_dp_mst_connector_late_register() - Late MST connector registration
2112 * @connector: The MST connector
2113 * @port: The MST port for this connector
2115 * Helper to register the remote aux device for this MST port. Drivers should
2116 * call this from their mst connector's late_register hook to enable MST aux
2119 * Return: 0 on success, negative error code on failure.
2121 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2122 struct drm_dp_mst_port *port)
2124 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2125 port->aux.name, connector->kdev->kobj.name);
2127 port->aux.dev = connector->kdev;
2128 return drm_dp_aux_register_devnode(&port->aux);
2130 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2133 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2134 * @connector: The MST connector
2135 * @port: The MST port for this connector
2137 * Helper to unregister the remote aux device for this MST port, registered by
2138 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2139 * connector's early_unregister hook.
2141 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2142 struct drm_dp_mst_port *port)
2144 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2145 port->aux.name, connector->kdev->kobj.name);
2146 drm_dp_aux_unregister_devnode(&port->aux);
2148 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2151 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2152 struct drm_dp_mst_port *port)
2154 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2158 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2159 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2160 if (!port->connector) {
2165 if (port->pdt != DP_PEER_DEVICE_NONE &&
2166 drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2167 port->cached_edid = drm_get_edid(port->connector,
2169 drm_connector_set_tile_property(port->connector);
2172 drm_connector_register(port->connector);
2176 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2180 * Drop a topology reference, and unlink the port from the in-memory topology
2184 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2185 struct drm_dp_mst_port *port)
2187 mutex_lock(&mgr->lock);
2188 port->parent->num_ports--;
2189 list_del(&port->next);
2190 mutex_unlock(&mgr->lock);
2191 drm_dp_mst_topology_put_port(port);
2194 static struct drm_dp_mst_port *
2195 drm_dp_mst_add_port(struct drm_device *dev,
2196 struct drm_dp_mst_topology_mgr *mgr,
2197 struct drm_dp_mst_branch *mstb, u8 port_number)
2199 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2204 kref_init(&port->topology_kref);
2205 kref_init(&port->malloc_kref);
2206 port->parent = mstb;
2207 port->port_num = port_number;
2209 port->aux.name = "DPMST";
2210 port->aux.dev = dev->dev;
2211 port->aux.is_remote = true;
2213 /* initialize the MST downstream port's AUX crc work queue */
2214 drm_dp_remote_aux_init(&port->aux);
2217 * Make sure the memory allocation for our parent branch stays
2218 * around until our own memory allocation is released
2220 drm_dp_mst_get_mstb_malloc(mstb);
2226 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2227 struct drm_device *dev,
2228 struct drm_dp_link_addr_reply_port *port_msg)
2230 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2231 struct drm_dp_mst_port *port;
2232 int old_ddps = 0, ret;
2233 u8 new_pdt = DP_PEER_DEVICE_NONE;
2235 bool created = false, send_link_addr = false, changed = false;
2237 port = drm_dp_get_port(mstb, port_msg->port_number);
2239 port = drm_dp_mst_add_port(dev, mgr, mstb,
2240 port_msg->port_number);
2245 } else if (!port->input && port_msg->input_port && port->connector) {
2246 /* Since port->connector can't be changed here, we create a
2247 * new port if input_port changes from 0 to 1
2249 drm_dp_mst_topology_unlink_port(mgr, port);
2250 drm_dp_mst_topology_put_port(port);
2251 port = drm_dp_mst_add_port(dev, mgr, mstb,
2252 port_msg->port_number);
2257 } else if (port->input && !port_msg->input_port) {
2259 } else if (port->connector) {
2260 /* We're updating a port that's exposed to userspace, so do it
2263 drm_modeset_lock(&mgr->base.lock, NULL);
2265 old_ddps = port->ddps;
2266 changed = port->ddps != port_msg->ddps ||
2268 (port->ldps != port_msg->legacy_device_plug_status ||
2269 port->dpcd_rev != port_msg->dpcd_revision ||
2270 port->mcs != port_msg->mcs ||
2271 port->pdt != port_msg->peer_device_type ||
2272 port->num_sdp_stream_sinks !=
2273 port_msg->num_sdp_stream_sinks));
2276 port->input = port_msg->input_port;
2278 new_pdt = port_msg->peer_device_type;
2279 new_mcs = port_msg->mcs;
2280 port->ddps = port_msg->ddps;
2281 port->ldps = port_msg->legacy_device_plug_status;
2282 port->dpcd_rev = port_msg->dpcd_revision;
2283 port->num_sdp_streams = port_msg->num_sdp_streams;
2284 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2286 /* manage mstb port lists with mgr lock - take a reference
2289 mutex_lock(&mgr->lock);
2290 drm_dp_mst_topology_get_port(port);
2291 list_add(&port->next, &mstb->ports);
2293 mutex_unlock(&mgr->lock);
2297 * Reprobe PBN caps on both hotplug, and when re-probing the link
2298 * for our parent mstb
2300 if (old_ddps != port->ddps || !created) {
2301 if (port->ddps && !port->input) {
2302 ret = drm_dp_send_enum_path_resources(mgr, mstb,
2311 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2313 send_link_addr = true;
2314 } else if (ret < 0) {
2315 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2321 * If this port wasn't just created, then we're reprobing because
2322 * we're coming out of suspend. In this case, always resend the link
2323 * address if there's an MSTB on this port
2325 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2327 send_link_addr = true;
2329 if (port->connector)
2330 drm_modeset_unlock(&mgr->base.lock);
2331 else if (!port->input)
2332 drm_dp_mst_port_add_connector(mstb, port);
2334 if (send_link_addr && port->mstb) {
2335 ret = drm_dp_send_link_address(mgr, port->mstb);
2336 if (ret == 1) /* MSTB below us changed */
2342 /* put reference to this port */
2343 drm_dp_mst_topology_put_port(port);
2347 drm_dp_mst_topology_unlink_port(mgr, port);
2348 if (port->connector)
2349 drm_modeset_unlock(&mgr->base.lock);
2351 drm_dp_mst_topology_put_port(port);
2356 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2357 struct drm_dp_connection_status_notify *conn_stat)
2359 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2360 struct drm_dp_mst_port *port;
2361 int old_ddps, old_input, ret, i;
2364 bool dowork = false, create_connector = false;
2366 port = drm_dp_get_port(mstb, conn_stat->port_number);
2370 if (port->connector) {
2371 if (!port->input && conn_stat->input_port) {
2373 * We can't remove a connector from an already exposed
2374 * port, so just throw the port out and make sure we
2375 * reprobe the link address of it's parent MSTB
2377 drm_dp_mst_topology_unlink_port(mgr, port);
2378 mstb->link_address_sent = false;
2383 /* Locking is only needed if the port's exposed to userspace */
2384 drm_modeset_lock(&mgr->base.lock, NULL);
2385 } else if (port->input && !conn_stat->input_port) {
2386 create_connector = true;
2387 /* Reprobe link address so we get num_sdp_streams */
2388 mstb->link_address_sent = false;
2392 old_ddps = port->ddps;
2393 old_input = port->input;
2394 port->input = conn_stat->input_port;
2395 port->ldps = conn_stat->legacy_device_plug_status;
2396 port->ddps = conn_stat->displayport_device_plug_status;
2398 if (old_ddps != port->ddps) {
2399 if (port->ddps && !port->input)
2400 drm_dp_send_enum_path_resources(mgr, mstb, port);
2405 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2406 new_mcs = conn_stat->message_capability_status;
2407 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2410 } else if (ret < 0) {
2411 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2416 if (!old_input && old_ddps != port->ddps && !port->ddps) {
2417 for (i = 0; i < mgr->max_payloads; i++) {
2418 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2419 struct drm_dp_mst_port *port_validated;
2425 container_of(vcpi, struct drm_dp_mst_port, vcpi);
2427 drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2428 if (!port_validated) {
2429 mutex_lock(&mgr->payload_lock);
2430 vcpi->num_slots = 0;
2431 mutex_unlock(&mgr->payload_lock);
2433 drm_dp_mst_topology_put_port(port_validated);
2438 if (port->connector)
2439 drm_modeset_unlock(&mgr->base.lock);
2440 else if (create_connector)
2441 drm_dp_mst_port_add_connector(mstb, port);
2444 drm_dp_mst_topology_put_port(port);
2446 queue_work(system_long_wq, &mstb->mgr->work);
2449 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2452 struct drm_dp_mst_branch *mstb;
2453 struct drm_dp_mst_port *port;
2455 /* find the port by iterating down */
2457 mutex_lock(&mgr->lock);
2458 mstb = mgr->mst_primary;
2463 for (i = 0; i < lct - 1; i++) {
2464 int shift = (i % 2) ? 0 : 4;
2465 int port_num = (rad[i / 2] >> shift) & 0xf;
2467 list_for_each_entry(port, &mstb->ports, next) {
2468 if (port->port_num == port_num) {
2471 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2479 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2483 mutex_unlock(&mgr->lock);
2487 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2488 struct drm_dp_mst_branch *mstb,
2489 const uint8_t *guid)
2491 struct drm_dp_mst_branch *found_mstb;
2492 struct drm_dp_mst_port *port;
2494 if (memcmp(mstb->guid, guid, 16) == 0)
2498 list_for_each_entry(port, &mstb->ports, next) {
2502 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2511 static struct drm_dp_mst_branch *
2512 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2513 const uint8_t *guid)
2515 struct drm_dp_mst_branch *mstb;
2518 /* find the port by iterating down */
2519 mutex_lock(&mgr->lock);
2521 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2523 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2528 mutex_unlock(&mgr->lock);
2532 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2533 struct drm_dp_mst_branch *mstb)
2535 struct drm_dp_mst_port *port;
2537 bool changed = false;
2539 if (!mstb->link_address_sent) {
2540 ret = drm_dp_send_link_address(mgr, mstb);
2547 list_for_each_entry(port, &mstb->ports, next) {
2548 struct drm_dp_mst_branch *mstb_child = NULL;
2550 if (port->input || !port->ddps)
2554 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2558 ret = drm_dp_check_and_send_link_address(mgr,
2560 drm_dp_mst_topology_put_mstb(mstb_child);
2571 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2573 struct drm_dp_mst_topology_mgr *mgr =
2574 container_of(work, struct drm_dp_mst_topology_mgr, work);
2575 struct drm_device *dev = mgr->dev;
2576 struct drm_dp_mst_branch *mstb;
2578 bool clear_payload_id_table;
2580 mutex_lock(&mgr->probe_lock);
2582 mutex_lock(&mgr->lock);
2583 clear_payload_id_table = !mgr->payload_id_table_cleared;
2584 mgr->payload_id_table_cleared = true;
2586 mstb = mgr->mst_primary;
2588 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2592 mutex_unlock(&mgr->lock);
2594 mutex_unlock(&mgr->probe_lock);
2599 * Certain branch devices seem to incorrectly report an available_pbn
2600 * of 0 on downstream sinks, even after clearing the
2601 * DP_PAYLOAD_ALLOCATE_* registers in
2602 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2603 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2604 * things work again.
2606 if (clear_payload_id_table) {
2607 DRM_DEBUG_KMS("Clearing payload ID table\n");
2608 drm_dp_send_clear_payload_id_table(mgr, mstb);
2611 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2612 drm_dp_mst_topology_put_mstb(mstb);
2614 mutex_unlock(&mgr->probe_lock);
2616 drm_kms_helper_hotplug_event(dev);
2619 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2624 if (memchr_inv(guid, 0, 16))
2627 salt = get_jiffies_64();
2629 memcpy(&guid[0], &salt, sizeof(u64));
2630 memcpy(&guid[8], &salt, sizeof(u64));
2635 static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2636 u8 port_num, u32 offset, u8 num_bytes)
2638 struct drm_dp_sideband_msg_req_body req;
2640 req.req_type = DP_REMOTE_DPCD_READ;
2641 req.u.dpcd_read.port_number = port_num;
2642 req.u.dpcd_read.dpcd_address = offset;
2643 req.u.dpcd_read.num_bytes = num_bytes;
2644 drm_dp_encode_sideband_req(&req, msg);
2647 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2648 bool up, u8 *msg, int len)
2651 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2652 int tosend, total, offset;
2659 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2661 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2664 if (ret != tosend) {
2665 if (ret == -EIO && retries < 5) {
2669 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2675 } while (total > 0);
2679 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2680 struct drm_dp_sideband_msg_tx *txmsg)
2682 struct drm_dp_mst_branch *mstb = txmsg->dst;
2685 /* both msg slots are full */
2686 if (txmsg->seqno == -1) {
2687 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2688 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2691 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2692 txmsg->seqno = mstb->last_seqno;
2693 mstb->last_seqno ^= 1;
2694 } else if (mstb->tx_slots[0] == NULL)
2698 mstb->tx_slots[txmsg->seqno] = txmsg;
2701 req_type = txmsg->msg[0] & 0x7f;
2702 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2703 req_type == DP_RESOURCE_STATUS_NOTIFY)
2707 hdr->path_msg = txmsg->path_msg;
2708 hdr->lct = mstb->lct;
2709 hdr->lcr = mstb->lct - 1;
2711 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2712 hdr->seqno = txmsg->seqno;
2716 * process a single block of the next message in the sideband queue
2718 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2719 struct drm_dp_sideband_msg_tx *txmsg,
2723 struct drm_dp_sideband_msg_hdr hdr;
2724 int len, space, idx, tosend;
2727 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2729 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2731 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2734 /* make hdr from dst mst - for replies use seqno
2735 otherwise assign one */
2736 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2740 /* amount left to send in this message */
2741 len = txmsg->cur_len - txmsg->cur_offset;
2743 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2744 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2746 tosend = min(len, space);
2747 if (len == txmsg->cur_len)
2753 hdr.msg_len = tosend + 1;
2754 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2755 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2756 /* add crc at end */
2757 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2760 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2761 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2762 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2764 drm_printf(&p, "sideband msg failed to send\n");
2765 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2769 txmsg->cur_offset += tosend;
2770 if (txmsg->cur_offset == txmsg->cur_len) {
2771 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2777 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2779 struct drm_dp_sideband_msg_tx *txmsg;
2782 WARN_ON(!mutex_is_locked(&mgr->qlock));
2784 /* construct a chunk from the first msg in the tx_msg queue */
2785 if (list_empty(&mgr->tx_msg_downq))
2788 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2789 ret = process_single_tx_qlock(mgr, txmsg, false);
2791 /* txmsg is sent it should be in the slots now */
2792 list_del(&txmsg->next);
2794 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2795 list_del(&txmsg->next);
2796 if (txmsg->seqno != -1)
2797 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2798 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2799 wake_up_all(&mgr->tx_waitq);
2803 /* called holding qlock */
2804 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2805 struct drm_dp_sideband_msg_tx *txmsg)
2809 /* construct a chunk from the first msg in the tx_msg queue */
2810 ret = process_single_tx_qlock(mgr, txmsg, true);
2813 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2815 if (txmsg->seqno != -1) {
2816 WARN_ON((unsigned int)txmsg->seqno >
2817 ARRAY_SIZE(txmsg->dst->tx_slots));
2818 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2822 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2823 struct drm_dp_sideband_msg_tx *txmsg)
2825 mutex_lock(&mgr->qlock);
2826 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2828 if (drm_debug_enabled(DRM_UT_DP)) {
2829 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2831 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2834 if (list_is_singular(&mgr->tx_msg_downq))
2835 process_single_down_tx_qlock(mgr);
2836 mutex_unlock(&mgr->qlock);
2840 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2842 struct drm_dp_link_addr_reply_port *port_reply;
2845 for (i = 0; i < reply->nports; i++) {
2846 port_reply = &reply->ports[i];
2847 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2849 port_reply->input_port,
2850 port_reply->peer_device_type,
2851 port_reply->port_number,
2852 port_reply->dpcd_revision,
2855 port_reply->legacy_device_plug_status,
2856 port_reply->num_sdp_streams,
2857 port_reply->num_sdp_stream_sinks);
2861 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2862 struct drm_dp_mst_branch *mstb)
2864 struct drm_dp_sideband_msg_tx *txmsg;
2865 struct drm_dp_link_address_ack_reply *reply;
2866 struct drm_dp_mst_port *port, *tmp;
2867 int i, ret, port_mask = 0;
2868 bool changed = false;
2870 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2875 build_link_address(txmsg);
2877 mstb->link_address_sent = true;
2878 drm_dp_queue_down_tx(mgr, txmsg);
2880 /* FIXME: Actually do some real error handling here */
2881 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2883 DRM_ERROR("Sending link address failed with %d\n", ret);
2886 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2887 DRM_ERROR("link address NAK received\n");
2892 reply = &txmsg->reply.u.link_addr;
2893 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2894 drm_dp_dump_link_address(reply);
2896 ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2900 drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2901 DRM_ERROR("GUID check on %s failed: %d\n",
2906 for (i = 0; i < reply->nports; i++) {
2907 port_mask |= BIT(reply->ports[i].port_number);
2908 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2916 /* Prune any ports that are currently a part of mstb in our in-memory
2917 * topology, but were not seen in this link address. Usually this
2918 * means that they were removed while the topology was out of sync,
2919 * e.g. during suspend/resume
2921 mutex_lock(&mgr->lock);
2922 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2923 if (port_mask & BIT(port->port_num))
2926 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2928 list_del(&port->next);
2929 drm_dp_mst_topology_put_port(port);
2932 mutex_unlock(&mgr->lock);
2936 mstb->link_address_sent = false;
2938 return ret < 0 ? ret : changed;
2941 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2942 struct drm_dp_mst_branch *mstb)
2944 struct drm_dp_sideband_msg_tx *txmsg;
2947 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2952 build_clear_payload_id_table(txmsg);
2954 drm_dp_queue_down_tx(mgr, txmsg);
2956 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2957 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2958 DRM_DEBUG_KMS("clear payload table id nak received\n");
2964 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2965 struct drm_dp_mst_branch *mstb,
2966 struct drm_dp_mst_port *port)
2968 struct drm_dp_enum_path_resources_ack_reply *path_res;
2969 struct drm_dp_sideband_msg_tx *txmsg;
2972 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2977 build_enum_path_resources(txmsg, port->port_num);
2979 drm_dp_queue_down_tx(mgr, txmsg);
2981 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2984 path_res = &txmsg->reply.u.path_resources;
2986 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2987 DRM_DEBUG_KMS("enum path resources nak received\n");
2989 if (port->port_num != path_res->port_number)
2990 DRM_ERROR("got incorrect port in response\n");
2992 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2993 path_res->port_number,
2994 path_res->full_payload_bw_number,
2995 path_res->avail_payload_bw_number);
2998 * If something changed, make sure we send a
3001 if (port->full_pbn != path_res->full_payload_bw_number ||
3002 port->fec_capable != path_res->fec_capable)
3005 port->full_pbn = path_res->full_payload_bw_number;
3006 port->fec_capable = path_res->fec_capable;
3014 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3016 if (!mstb->port_parent)
3019 if (mstb->port_parent->mstb != mstb)
3020 return mstb->port_parent;
3022 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3026 * Searches upwards in the topology starting from mstb to try to find the
3027 * closest available parent of mstb that's still connected to the rest of the
3028 * topology. This can be used in order to perform operations like releasing
3029 * payloads, where the branch device which owned the payload may no longer be
3030 * around and thus would require that the payload on the last living relative
3033 static struct drm_dp_mst_branch *
3034 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3035 struct drm_dp_mst_branch *mstb,
3038 struct drm_dp_mst_branch *rmstb = NULL;
3039 struct drm_dp_mst_port *found_port;
3041 mutex_lock(&mgr->lock);
3042 if (!mgr->mst_primary)
3046 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3050 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3051 rmstb = found_port->parent;
3052 *port_num = found_port->port_num;
3054 /* Search again, starting from this parent */
3055 mstb = found_port->parent;
3059 mutex_unlock(&mgr->lock);
3063 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3064 struct drm_dp_mst_port *port,
3068 struct drm_dp_sideband_msg_tx *txmsg;
3069 struct drm_dp_mst_branch *mstb;
3071 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3074 port_num = port->port_num;
3075 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3077 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3085 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3091 for (i = 0; i < port->num_sdp_streams; i++)
3095 build_allocate_payload(txmsg, port_num,
3097 pbn, port->num_sdp_streams, sinks);
3099 drm_dp_queue_down_tx(mgr, txmsg);
3102 * FIXME: there is a small chance that between getting the last
3103 * connected mstb and sending the payload message, the last connected
3104 * mstb could also be removed from the topology. In the future, this
3105 * needs to be fixed by restarting the
3106 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3107 * timeout if the topology is still connected to the system.
3109 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3111 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3118 drm_dp_mst_topology_put_mstb(mstb);
3122 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3123 struct drm_dp_mst_port *port, bool power_up)
3125 struct drm_dp_sideband_msg_tx *txmsg;
3128 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3132 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3134 drm_dp_mst_topology_put_port(port);
3138 txmsg->dst = port->parent;
3139 build_power_updown_phy(txmsg, port->port_num, power_up);
3140 drm_dp_queue_down_tx(mgr, txmsg);
3142 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3144 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3150 drm_dp_mst_topology_put_port(port);
3154 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3156 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3158 struct drm_dp_payload *payload)
3162 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3164 payload->payload_state = 0;
3167 payload->payload_state = DP_PAYLOAD_LOCAL;
3171 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3172 struct drm_dp_mst_port *port,
3174 struct drm_dp_payload *payload)
3177 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3180 payload->payload_state = DP_PAYLOAD_REMOTE;
3184 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3185 struct drm_dp_mst_port *port,
3187 struct drm_dp_payload *payload)
3189 DRM_DEBUG_KMS("\n");
3190 /* it's okay for these to fail */
3192 drm_dp_payload_send_msg(mgr, port, id, 0);
3195 drm_dp_dpcd_write_payload(mgr, id, payload);
3196 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3200 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3202 struct drm_dp_payload *payload)
3204 payload->payload_state = 0;
3209 * drm_dp_update_payload_part1() - Execute payload update part 1
3210 * @mgr: manager to use.
3212 * This iterates over all proposed virtual channels, and tries to
3213 * allocate space in the link for them. For 0->slots transitions,
3214 * this step just writes the VCPI to the MST device. For slots->0
3215 * transitions, this writes the updated VCPIs and removes the
3216 * remote VC payloads.
3218 * after calling this the driver should generate ACT and payload
3221 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3223 struct drm_dp_payload req_payload;
3224 struct drm_dp_mst_port *port;
3228 mutex_lock(&mgr->payload_lock);
3229 for (i = 0; i < mgr->max_payloads; i++) {
3230 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3231 struct drm_dp_payload *payload = &mgr->payloads[i];
3232 bool put_port = false;
3234 /* solve the current payloads - compare to the hw ones
3235 - update the hw view */
3236 req_payload.start_slot = cur_slots;
3238 port = container_of(vcpi, struct drm_dp_mst_port,
3241 /* Validated ports don't matter if we're releasing
3244 if (vcpi->num_slots) {
3245 port = drm_dp_mst_topology_get_port_validated(
3248 mutex_unlock(&mgr->payload_lock);
3254 req_payload.num_slots = vcpi->num_slots;
3255 req_payload.vcpi = vcpi->vcpi;
3258 req_payload.num_slots = 0;
3261 payload->start_slot = req_payload.start_slot;
3262 /* work out what is required to happen with this payload */
3263 if (payload->num_slots != req_payload.num_slots) {
3265 /* need to push an update for this payload */
3266 if (req_payload.num_slots) {
3267 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3269 payload->num_slots = req_payload.num_slots;
3270 payload->vcpi = req_payload.vcpi;
3272 } else if (payload->num_slots) {
3273 payload->num_slots = 0;
3274 drm_dp_destroy_payload_step1(mgr, port,
3277 req_payload.payload_state =
3278 payload->payload_state;
3279 payload->start_slot = 0;
3281 payload->payload_state = req_payload.payload_state;
3283 cur_slots += req_payload.num_slots;
3286 drm_dp_mst_topology_put_port(port);
3289 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3290 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3295 DRM_DEBUG_KMS("removing payload %d\n", i);
3296 for (j = i; j < mgr->max_payloads - 1; j++) {
3297 mgr->payloads[j] = mgr->payloads[j + 1];
3298 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3300 if (mgr->proposed_vcpis[j] &&
3301 mgr->proposed_vcpis[j]->num_slots) {
3302 set_bit(j + 1, &mgr->payload_mask);
3304 clear_bit(j + 1, &mgr->payload_mask);
3308 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3309 sizeof(struct drm_dp_payload));
3310 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3311 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3313 mutex_unlock(&mgr->payload_lock);
3317 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3320 * drm_dp_update_payload_part2() - Execute payload update part 2
3321 * @mgr: manager to use.
3323 * This iterates over all proposed virtual channels, and tries to
3324 * allocate space in the link for them. For 0->slots transitions,
3325 * this step writes the remote VC payload commands. For slots->0
3326 * this just resets some internal state.
3328 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3330 struct drm_dp_mst_port *port;
3333 mutex_lock(&mgr->payload_lock);
3334 for (i = 0; i < mgr->max_payloads; i++) {
3336 if (!mgr->proposed_vcpis[i])
3339 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3341 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3342 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3343 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3344 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3345 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3348 mutex_unlock(&mgr->payload_lock);
3352 mutex_unlock(&mgr->payload_lock);
3355 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3357 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3358 struct drm_dp_mst_port *port,
3359 int offset, int size, u8 *bytes)
3362 struct drm_dp_sideband_msg_tx *txmsg;
3363 struct drm_dp_mst_branch *mstb;
3365 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3369 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3375 build_dpcd_read(txmsg, port->port_num, offset, size);
3376 txmsg->dst = port->parent;
3378 drm_dp_queue_down_tx(mgr, txmsg);
3380 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3384 /* DPCD read should never be NACKed */
3385 if (txmsg->reply.reply_type == 1) {
3386 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3387 mstb, port->port_num, offset, size);
3392 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3397 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3399 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3404 drm_dp_mst_topology_put_mstb(mstb);
3409 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3410 struct drm_dp_mst_port *port,
3411 int offset, int size, u8 *bytes)
3414 struct drm_dp_sideband_msg_tx *txmsg;
3415 struct drm_dp_mst_branch *mstb;
3417 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3421 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3427 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3430 drm_dp_queue_down_tx(mgr, txmsg);
3432 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3433 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3438 drm_dp_mst_topology_put_mstb(mstb);
3442 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3444 struct drm_dp_sideband_msg_reply_body reply;
3446 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3447 reply.req_type = req_type;
3448 drm_dp_encode_sideband_reply(&reply, msg);
3452 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3453 struct drm_dp_mst_branch *mstb,
3454 int req_type, int seqno, bool broadcast)
3456 struct drm_dp_sideband_msg_tx *txmsg;
3458 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3463 txmsg->seqno = seqno;
3464 drm_dp_encode_up_ack_reply(txmsg, req_type);
3466 mutex_lock(&mgr->qlock);
3468 process_single_up_tx_qlock(mgr, txmsg);
3470 mutex_unlock(&mgr->qlock);
3476 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
3478 if (dp_link_bw == 0 || dp_link_count == 0)
3479 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3480 dp_link_bw, dp_link_count);
3482 return dp_link_bw * dp_link_count / 2;
3486 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3487 * @mgr: manager to set state for
3488 * @mst_state: true to enable MST on this connector - false to disable.
3490 * This is called by the driver when it detects an MST capable device plugged
3491 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3493 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3496 struct drm_dp_mst_branch *mstb = NULL;
3498 mutex_lock(&mgr->payload_lock);
3499 mutex_lock(&mgr->lock);
3500 if (mst_state == mgr->mst_state)
3503 mgr->mst_state = mst_state;
3504 /* set the device into MST mode */
3506 struct drm_dp_payload reset_pay;
3508 WARN_ON(mgr->mst_primary);
3511 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3512 if (ret != DP_RECEIVER_CAP_SIZE) {
3513 DRM_DEBUG_KMS("failed to read DPCD\n");
3517 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3518 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3519 if (mgr->pbn_div == 0) {
3524 /* add initial branch device at LCT 1 */
3525 mstb = drm_dp_add_mst_branch_device(1, NULL);
3532 /* give this the main reference */
3533 mgr->mst_primary = mstb;
3534 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3536 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3539 DP_UPSTREAM_IS_SRC);
3543 reset_pay.start_slot = 0;
3544 reset_pay.num_slots = 0x3f;
3545 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3547 queue_work(system_long_wq, &mgr->work);
3551 /* disable MST on the device */
3552 mstb = mgr->mst_primary;
3553 mgr->mst_primary = NULL;
3554 /* this can fail if the device is gone */
3555 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3557 memset(mgr->payloads, 0,
3558 mgr->max_payloads * sizeof(mgr->payloads[0]));
3559 memset(mgr->proposed_vcpis, 0,
3560 mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
3561 mgr->payload_mask = 0;
3562 set_bit(0, &mgr->payload_mask);
3564 mgr->payload_id_table_cleared = false;
3568 mutex_unlock(&mgr->lock);
3569 mutex_unlock(&mgr->payload_lock);
3571 drm_dp_mst_topology_put_mstb(mstb);
3575 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3578 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3580 struct drm_dp_mst_port *port;
3582 /* The link address will need to be re-sent on resume */
3583 mstb->link_address_sent = false;
3585 list_for_each_entry(port, &mstb->ports, next)
3587 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3591 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3592 * @mgr: manager to suspend
3594 * This function tells the MST device that we can't handle UP messages
3595 * anymore. This should stop it from sending any since we are suspended.
3597 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3599 mutex_lock(&mgr->lock);
3600 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3601 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3602 mutex_unlock(&mgr->lock);
3603 flush_work(&mgr->up_req_work);
3604 flush_work(&mgr->work);
3605 flush_work(&mgr->delayed_destroy_work);
3607 mutex_lock(&mgr->lock);
3608 if (mgr->mst_state && mgr->mst_primary)
3609 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3610 mutex_unlock(&mgr->lock);
3612 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3615 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3616 * @mgr: manager to resume
3617 * @sync: whether or not to perform topology reprobing synchronously
3619 * This will fetch DPCD and see if the device is still there,
3620 * if it is, it will rewrite the MSTM control bits, and return.
3622 * If the device fails this returns -1, and the driver should do
3623 * a full MST reprobe, in case we were undocked.
3625 * During system resume (where it is assumed that the driver will be calling
3626 * drm_atomic_helper_resume()) this function should be called beforehand with
3627 * @sync set to true. In contexts like runtime resume where the driver is not
3628 * expected to be calling drm_atomic_helper_resume(), this function should be
3629 * called with @sync set to false in order to avoid deadlocking.
3631 * Returns: -1 if the MST topology was removed while we were suspended, 0
3634 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3640 mutex_lock(&mgr->lock);
3641 if (!mgr->mst_primary)
3644 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3645 DP_RECEIVER_CAP_SIZE);
3646 if (ret != DP_RECEIVER_CAP_SIZE) {
3647 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3651 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3654 DP_UPSTREAM_IS_SRC);
3656 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3660 /* Some hubs forget their guids after they resume */
3661 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3663 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3667 ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3669 DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3674 * For the final step of resuming the topology, we need to bring the
3675 * state of our in-memory topology back into sync with reality. So,
3676 * restart the probing process as if we're probing a new hub
3678 queue_work(system_long_wq, &mgr->work);
3679 mutex_unlock(&mgr->lock);
3682 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3683 flush_work(&mgr->work);
3689 mutex_unlock(&mgr->lock);
3692 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3694 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3695 struct drm_dp_mst_branch **mstb, int *seqno)
3699 int replylen, curreply;
3702 struct drm_dp_sideband_msg_hdr hdr;
3703 struct drm_dp_sideband_msg_rx *msg;
3704 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3705 DP_SIDEBAND_MSG_DOWN_REP_BASE;
3711 len = min(mgr->max_dpcd_transaction_bytes, 16);
3712 ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
3714 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3718 ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
3720 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3721 1, replyblock, len, false);
3722 DRM_DEBUG_KMS("ERROR: failed header\n");
3729 msg = &mgr->up_req_recv;
3731 /* Caller is responsible for giving back this reference */
3732 *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3734 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3738 msg = &(*mstb)->down_rep_recv[hdr.seqno];
3741 if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3742 DRM_DEBUG_KMS("sideband msg set header failed %d\n",
3747 replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3748 ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
3750 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3754 replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
3756 while (replylen > 0) {
3757 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3758 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3761 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3766 ret = drm_dp_sideband_append_payload(msg, replyblock, len);
3768 DRM_DEBUG_KMS("failed to build sideband msg\n");
3778 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3780 struct drm_dp_sideband_msg_tx *txmsg;
3781 struct drm_dp_mst_branch *mstb = NULL;
3782 struct drm_dp_sideband_msg_rx *msg = NULL;
3785 if (!drm_dp_get_one_sb_msg(mgr, false, &mstb, &seqno))
3786 goto out_clear_reply;
3788 msg = &mstb->down_rep_recv[seqno];
3790 /* Multi-packet message transmission, don't clear the reply */
3791 if (!msg->have_eomt)
3794 /* find the message */
3795 mutex_lock(&mgr->qlock);
3796 txmsg = mstb->tx_slots[seqno];
3797 /* remove from slots */
3798 mutex_unlock(&mgr->qlock);
3801 struct drm_dp_sideband_msg_hdr *hdr;
3802 hdr = &msg->initial_hdr;
3803 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3804 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3806 goto out_clear_reply;
3809 drm_dp_sideband_parse_reply(msg, &txmsg->reply);
3811 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3812 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3813 txmsg->reply.req_type,
3814 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3815 txmsg->reply.u.nak.reason,
3816 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3817 txmsg->reply.u.nak.nak_data);
3820 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3821 drm_dp_mst_topology_put_mstb(mstb);
3823 mutex_lock(&mgr->qlock);
3824 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3825 mstb->tx_slots[seqno] = NULL;
3826 mutex_unlock(&mgr->qlock);
3828 wake_up_all(&mgr->tx_waitq);
3834 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
3837 drm_dp_mst_topology_put_mstb(mstb);
3843 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3844 struct drm_dp_pending_up_req *up_req)
3846 struct drm_dp_mst_branch *mstb = NULL;
3847 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3848 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3849 bool hotplug = false;
3851 if (hdr->broadcast) {
3852 const u8 *guid = NULL;
3854 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3855 guid = msg->u.conn_stat.guid;
3856 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3857 guid = msg->u.resource_stat.guid;
3860 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3862 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3866 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3871 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3872 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3873 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3877 drm_dp_mst_topology_put_mstb(mstb);
3881 static void drm_dp_mst_up_req_work(struct work_struct *work)
3883 struct drm_dp_mst_topology_mgr *mgr =
3884 container_of(work, struct drm_dp_mst_topology_mgr,
3886 struct drm_dp_pending_up_req *up_req;
3887 bool send_hotplug = false;
3889 mutex_lock(&mgr->probe_lock);
3891 mutex_lock(&mgr->up_req_lock);
3892 up_req = list_first_entry_or_null(&mgr->up_req_list,
3893 struct drm_dp_pending_up_req,
3896 list_del(&up_req->next);
3897 mutex_unlock(&mgr->up_req_lock);
3902 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3905 mutex_unlock(&mgr->probe_lock);
3908 drm_kms_helper_hotplug_event(mgr->dev);
3911 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3913 struct drm_dp_pending_up_req *up_req;
3916 if (!drm_dp_get_one_sb_msg(mgr, true, NULL, &seqno))
3919 if (!mgr->up_req_recv.have_eomt)
3922 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3924 DRM_ERROR("Not enough memory to process MST up req\n");
3927 INIT_LIST_HEAD(&up_req->next);
3929 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3931 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3932 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3933 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3934 up_req->msg.req_type);
3939 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3942 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3943 const struct drm_dp_connection_status_notify *conn_stat =
3944 &up_req->msg.u.conn_stat;
3946 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3947 conn_stat->port_number,
3948 conn_stat->legacy_device_plug_status,
3949 conn_stat->displayport_device_plug_status,
3950 conn_stat->message_capability_status,
3951 conn_stat->input_port,
3952 conn_stat->peer_device_type);
3953 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3954 const struct drm_dp_resource_status_notify *res_stat =
3955 &up_req->msg.u.resource_stat;
3957 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3958 res_stat->port_number,
3959 res_stat->available_pbn);
3962 up_req->hdr = mgr->up_req_recv.initial_hdr;
3963 mutex_lock(&mgr->up_req_lock);
3964 list_add_tail(&up_req->next, &mgr->up_req_list);
3965 mutex_unlock(&mgr->up_req_lock);
3966 queue_work(system_long_wq, &mgr->up_req_work);
3969 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3974 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3975 * @mgr: manager to notify irq for.
3976 * @esi: 4 bytes from SINK_COUNT_ESI
3977 * @handled: whether the hpd interrupt was consumed or not
3979 * This should be called from the driver when it detects a short IRQ,
3980 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3981 * topology manager will process the sideband messages received as a result
3984 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3991 if (sc != mgr->sink_count) {
3992 mgr->sink_count = sc;
3996 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3997 ret = drm_dp_mst_handle_down_rep(mgr);
4001 if (esi[1] & DP_UP_REQ_MSG_RDY) {
4002 ret |= drm_dp_mst_handle_up_req(mgr);
4006 drm_dp_mst_kick_tx(mgr);
4009 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4012 * drm_dp_mst_detect_port() - get connection status for an MST port
4013 * @connector: DRM connector for this port
4014 * @ctx: The acquisition context to use for grabbing locks
4015 * @mgr: manager for this port
4016 * @port: pointer to a port
4018 * This returns the current connection state for a port.
4021 drm_dp_mst_detect_port(struct drm_connector *connector,
4022 struct drm_modeset_acquire_ctx *ctx,
4023 struct drm_dp_mst_topology_mgr *mgr,
4024 struct drm_dp_mst_port *port)
4028 /* we need to search for the port in the mgr in case it's gone */
4029 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4031 return connector_status_disconnected;
4033 ret = drm_modeset_lock(&mgr->base.lock, ctx);
4037 ret = connector_status_disconnected;
4042 switch (port->pdt) {
4043 case DP_PEER_DEVICE_NONE:
4044 case DP_PEER_DEVICE_MST_BRANCHING:
4046 ret = connector_status_connected;
4049 case DP_PEER_DEVICE_SST_SINK:
4050 ret = connector_status_connected;
4051 /* for logical ports - cache the EDID */
4052 if (port->port_num >= 8 && !port->cached_edid) {
4053 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4056 case DP_PEER_DEVICE_DP_LEGACY_CONV:
4058 ret = connector_status_connected;
4062 drm_dp_mst_topology_put_port(port);
4065 EXPORT_SYMBOL(drm_dp_mst_detect_port);
4068 * drm_dp_mst_get_edid() - get EDID for an MST port
4069 * @connector: toplevel connector to get EDID for
4070 * @mgr: manager for this port
4071 * @port: unverified pointer to a port.
4073 * This returns an EDID for the port connected to a connector,
4074 * It validates the pointer still exists so the caller doesn't require a
4077 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4079 struct edid *edid = NULL;
4081 /* we need to search for the port in the mgr in case it's gone */
4082 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4086 if (port->cached_edid)
4087 edid = drm_edid_duplicate(port->cached_edid);
4089 edid = drm_get_edid(connector, &port->aux.ddc);
4091 port->has_audio = drm_detect_monitor_audio(edid);
4092 drm_dp_mst_topology_put_port(port);
4095 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4098 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4099 * @mgr: manager to use
4100 * @pbn: payload bandwidth to convert into slots.
4102 * Calculate the number of VCPI slots that will be required for the given PBN
4103 * value. This function is deprecated, and should not be used in atomic
4107 * The total slots required for this port, or error.
4109 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4114 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4116 /* max. time slots - one slot for MTP header */
4121 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4123 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4124 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4128 /* max. time slots - one slot for MTP header */
4133 vcpi->aligned_pbn = slots * mgr->pbn_div;
4134 vcpi->num_slots = slots;
4136 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4143 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4144 * @state: global atomic state
4145 * @mgr: MST topology manager for the port
4146 * @port: port to find vcpi slots for
4147 * @pbn: bandwidth required for the mode in PBN
4148 * @pbn_div: divider for DSC mode that takes FEC into account
4150 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4151 * may have had. Any atomic drivers which support MST must call this function
4152 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4153 * current VCPI allocation for the new state, but only when
4154 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4155 * to ensure compatibility with userspace applications that still use the
4156 * legacy modesetting UAPI.
4158 * Allocations set by this function are not checked against the bandwidth
4159 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4161 * Additionally, it is OK to call this function multiple times on the same
4162 * @port as needed. It is not OK however, to call this function and
4163 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4166 * drm_dp_atomic_release_vcpi_slots()
4167 * drm_dp_mst_atomic_check()
4170 * Total slots in the atomic state assigned for this port, or a negative error
4171 * code if the port no longer exists
4173 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4174 struct drm_dp_mst_topology_mgr *mgr,
4175 struct drm_dp_mst_port *port, int pbn,
4178 struct drm_dp_mst_topology_state *topology_state;
4179 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4180 int prev_slots, prev_bw, req_slots;
4182 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4183 if (IS_ERR(topology_state))
4184 return PTR_ERR(topology_state);
4186 /* Find the current allocation for this port, if any */
4187 list_for_each_entry(pos, &topology_state->vcpis, next) {
4188 if (pos->port == port) {
4190 prev_slots = vcpi->vcpi;
4191 prev_bw = vcpi->pbn;
4194 * This should never happen, unless the driver tries
4195 * releasing and allocating the same VCPI allocation,
4198 if (WARN_ON(!prev_slots)) {
4199 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4213 pbn_div = mgr->pbn_div;
4215 req_slots = DIV_ROUND_UP(pbn, pbn_div);
4217 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4218 port->connector->base.id, port->connector->name,
4219 port, prev_slots, req_slots);
4220 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4221 port->connector->base.id, port->connector->name,
4222 port, prev_bw, pbn);
4224 /* Add the new allocation to the state */
4226 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4230 drm_dp_mst_get_port_malloc(port);
4232 list_add(&vcpi->next, &topology_state->vcpis);
4234 vcpi->vcpi = req_slots;
4239 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4242 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4243 * @state: global atomic state
4244 * @mgr: MST topology manager for the port
4245 * @port: The port to release the VCPI slots from
4247 * Releases any VCPI slots that have been allocated to a port in the atomic
4248 * state. Any atomic drivers which support MST must call this function in
4249 * their &drm_connector_helper_funcs.atomic_check() callback when the
4250 * connector will no longer have VCPI allocated (e.g. because its CRTC was
4251 * removed) when it had VCPI allocated in the previous atomic state.
4253 * It is OK to call this even if @port has been removed from the system.
4254 * Additionally, it is OK to call this function multiple times on the same
4255 * @port as needed. It is not OK however, to call this function and
4256 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4260 * drm_dp_atomic_find_vcpi_slots()
4261 * drm_dp_mst_atomic_check()
4264 * 0 if all slots for this port were added back to
4265 * &drm_dp_mst_topology_state.avail_slots or negative error code
4267 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4268 struct drm_dp_mst_topology_mgr *mgr,
4269 struct drm_dp_mst_port *port)
4271 struct drm_dp_mst_topology_state *topology_state;
4272 struct drm_dp_vcpi_allocation *pos;
4275 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4276 if (IS_ERR(topology_state))
4277 return PTR_ERR(topology_state);
4279 list_for_each_entry(pos, &topology_state->vcpis, next) {
4280 if (pos->port == port) {
4285 if (WARN_ON(!found)) {
4286 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4287 port, &topology_state->base);
4291 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4293 drm_dp_mst_put_port_malloc(port);
4299 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4302 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4303 * @mgr: manager for this port
4304 * @port: port to allocate a virtual channel for.
4305 * @pbn: payload bandwidth number to request
4306 * @slots: returned number of slots for this PBN.
4308 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4309 struct drm_dp_mst_port *port, int pbn, int slots)
4313 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4320 if (port->vcpi.vcpi > 0) {
4321 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4322 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4323 if (pbn == port->vcpi.pbn) {
4324 drm_dp_mst_topology_put_port(port);
4329 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4331 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4332 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4335 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4336 pbn, port->vcpi.num_slots);
4338 /* Keep port allocated until its payload has been removed */
4339 drm_dp_mst_get_port_malloc(port);
4340 drm_dp_mst_topology_put_port(port);
4345 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4347 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4350 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4354 slots = port->vcpi.num_slots;
4355 drm_dp_mst_topology_put_port(port);
4358 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4361 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4362 * @mgr: manager for this port
4363 * @port: unverified pointer to a port.
4365 * This just resets the number of slots for the ports VCPI for later programming.
4367 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4370 * A port with VCPI will remain allocated until its VCPI is
4371 * released, no verified ref needed
4374 port->vcpi.num_slots = 0;
4376 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4379 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4380 * @mgr: manager for this port
4381 * @port: port to deallocate vcpi for
4383 * This can be called unconditionally, regardless of whether
4384 * drm_dp_mst_allocate_vcpi() succeeded or not.
4386 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4387 struct drm_dp_mst_port *port)
4389 if (!port->vcpi.vcpi)
4392 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4393 port->vcpi.num_slots = 0;
4395 port->vcpi.aligned_pbn = 0;
4396 port->vcpi.vcpi = 0;
4397 drm_dp_mst_put_port_malloc(port);
4399 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4401 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4402 int id, struct drm_dp_payload *payload)
4404 u8 payload_alloc[3], status;
4408 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4409 DP_PAYLOAD_TABLE_UPDATED);
4411 payload_alloc[0] = id;
4412 payload_alloc[1] = payload->start_slot;
4413 payload_alloc[2] = payload->num_slots;
4415 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4417 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4422 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4424 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4428 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4431 usleep_range(10000, 20000);
4434 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4443 static int do_get_act_status(struct drm_dp_aux *aux)
4448 ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4456 * drm_dp_check_act_status() - Polls for ACT handled status.
4457 * @mgr: manager to use
4459 * Tries waiting for the MST hub to finish updating it's payload table by
4460 * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4464 * 0 if the ACT was handled in time, negative error code on failure.
4466 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4469 * There doesn't seem to be any recommended retry count or timeout in
4470 * the MST specification. Since some hubs have been observed to take
4471 * over 1 second to update their payload allocations under certain
4472 * conditions, we use a rather large timeout value.
4474 const int timeout_ms = 3000;
4477 ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
4478 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
4479 200, timeout_ms * USEC_PER_MSEC);
4480 if (ret < 0 && status >= 0) {
4481 DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
4482 timeout_ms, status);
4484 } else if (status < 0) {
4486 * Failure here isn't unexpected - the hub may have
4487 * just been unplugged
4489 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
4496 EXPORT_SYMBOL(drm_dp_check_act_status);
4499 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4500 * @clock: dot clock for the mode
4501 * @bpp: bpp for the mode.
4502 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
4504 * This uses the formula in the spec to calculate the PBN value for a mode.
4506 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4509 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4510 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4511 * common multiplier to render an integer PBN for all link rate/lane
4512 * counts combinations
4514 * peak_kbps *= (1006/1000)
4515 * peak_kbps *= (64/54)
4516 * peak_kbps *= 8 convert to bytes
4518 * If the bpp is in units of 1/16, further divide by 16. Put this
4519 * factor in the numerator rather than the denominator to avoid
4524 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4525 8 * 54 * 1000 * 1000);
4527 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4528 8 * 54 * 1000 * 1000);
4530 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4532 /* we want to kick the TX after we've ack the up/down IRQs. */
4533 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4535 queue_work(system_long_wq, &mgr->tx_work);
4538 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4539 struct drm_dp_mst_branch *mstb)
4541 struct drm_dp_mst_port *port;
4542 int tabs = mstb->lct;
4546 for (i = 0; i < tabs; i++)
4550 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4551 list_for_each_entry(port, &mstb->ports, next) {
4552 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4554 drm_dp_mst_dump_mstb(m, port->mstb);
4558 #define DP_PAYLOAD_TABLE_SIZE 64
4560 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4565 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4566 if (drm_dp_dpcd_read(mgr->aux,
4567 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4574 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4575 struct drm_dp_mst_port *port, char *name,
4578 struct edid *mst_edid;
4580 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4581 drm_edid_get_monitor_name(mst_edid, name, namelen);
4585 * drm_dp_mst_dump_topology(): dump topology to seq file.
4586 * @m: seq_file to dump output to
4587 * @mgr: manager to dump current topology for.
4589 * helper to dump MST topology to a seq file for debugfs.
4591 void drm_dp_mst_dump_topology(struct seq_file *m,
4592 struct drm_dp_mst_topology_mgr *mgr)
4595 struct drm_dp_mst_port *port;
4597 mutex_lock(&mgr->lock);
4598 if (mgr->mst_primary)
4599 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4602 mutex_unlock(&mgr->lock);
4604 mutex_lock(&mgr->payload_lock);
4605 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4608 for (i = 0; i < mgr->max_payloads; i++) {
4609 if (mgr->proposed_vcpis[i]) {
4612 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4613 fetch_monitor_name(mgr, port, name, sizeof(name));
4614 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4615 port->port_num, port->vcpi.vcpi,
4616 port->vcpi.num_slots,
4617 (*name != 0) ? name : "Unknown");
4619 seq_printf(m, "vcpi %d:unused\n", i);
4621 for (i = 0; i < mgr->max_payloads; i++) {
4622 seq_printf(m, "payload %d: %d, %d, %d\n",
4624 mgr->payloads[i].payload_state,
4625 mgr->payloads[i].start_slot,
4626 mgr->payloads[i].num_slots);
4630 mutex_unlock(&mgr->payload_lock);
4632 mutex_lock(&mgr->lock);
4633 if (mgr->mst_primary) {
4634 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4637 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4639 seq_printf(m, "dpcd read failed\n");
4642 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4644 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4646 seq_printf(m, "faux/mst read failed\n");
4649 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4651 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4653 seq_printf(m, "mst ctrl read failed\n");
4656 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4658 /* dump the standard OUI branch header */
4659 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4661 seq_printf(m, "branch oui read failed\n");
4664 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4666 for (i = 0x3; i < 0x8 && buf[i]; i++)
4667 seq_printf(m, "%c", buf[i]);
4668 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4669 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4670 if (dump_dp_payload_table(mgr, buf))
4671 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4675 mutex_unlock(&mgr->lock);
4678 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4680 static void drm_dp_tx_work(struct work_struct *work)
4682 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4684 mutex_lock(&mgr->qlock);
4685 if (!list_empty(&mgr->tx_msg_downq))
4686 process_single_down_tx_qlock(mgr);
4687 mutex_unlock(&mgr->qlock);
4691 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4693 if (port->connector) {
4694 drm_connector_unregister(port->connector);
4695 drm_connector_put(port->connector);
4698 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4699 drm_dp_mst_put_port_malloc(port);
4703 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4705 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4706 struct drm_dp_mst_port *port, *tmp;
4707 bool wake_tx = false;
4709 mutex_lock(&mgr->lock);
4710 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
4711 list_del(&port->next);
4712 drm_dp_mst_topology_put_port(port);
4714 mutex_unlock(&mgr->lock);
4716 /* drop any tx slots msg */
4717 mutex_lock(&mstb->mgr->qlock);
4718 if (mstb->tx_slots[0]) {
4719 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4720 mstb->tx_slots[0] = NULL;
4723 if (mstb->tx_slots[1]) {
4724 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4725 mstb->tx_slots[1] = NULL;
4728 mutex_unlock(&mstb->mgr->qlock);
4731 wake_up_all(&mstb->mgr->tx_waitq);
4733 drm_dp_mst_put_mstb_malloc(mstb);
4736 static void drm_dp_delayed_destroy_work(struct work_struct *work)
4738 struct drm_dp_mst_topology_mgr *mgr =
4739 container_of(work, struct drm_dp_mst_topology_mgr,
4740 delayed_destroy_work);
4741 bool send_hotplug = false, go_again;
4744 * Not a regular list traverse as we have to drop the destroy
4745 * connector lock before destroying the mstb/port, to avoid AB->BA
4746 * ordering between this lock and the config mutex.
4752 struct drm_dp_mst_branch *mstb;
4754 mutex_lock(&mgr->delayed_destroy_lock);
4755 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4756 struct drm_dp_mst_branch,
4759 list_del(&mstb->destroy_next);
4760 mutex_unlock(&mgr->delayed_destroy_lock);
4765 drm_dp_delayed_destroy_mstb(mstb);
4770 struct drm_dp_mst_port *port;
4772 mutex_lock(&mgr->delayed_destroy_lock);
4773 port = list_first_entry_or_null(&mgr->destroy_port_list,
4774 struct drm_dp_mst_port,
4777 list_del(&port->next);
4778 mutex_unlock(&mgr->delayed_destroy_lock);
4783 drm_dp_delayed_destroy_port(port);
4784 send_hotplug = true;
4790 drm_kms_helper_hotplug_event(mgr->dev);
4793 static struct drm_private_state *
4794 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4796 struct drm_dp_mst_topology_state *state, *old_state =
4797 to_dp_mst_topology_state(obj->state);
4798 struct drm_dp_vcpi_allocation *pos, *vcpi;
4800 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4804 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4806 INIT_LIST_HEAD(&state->vcpis);
4808 list_for_each_entry(pos, &old_state->vcpis, next) {
4809 /* Prune leftover freed VCPI allocations */
4813 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4817 drm_dp_mst_get_port_malloc(vcpi->port);
4818 list_add(&vcpi->next, &state->vcpis);
4821 return &state->base;
4824 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4825 drm_dp_mst_put_port_malloc(pos->port);
4833 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4834 struct drm_private_state *state)
4836 struct drm_dp_mst_topology_state *mst_state =
4837 to_dp_mst_topology_state(state);
4838 struct drm_dp_vcpi_allocation *pos, *tmp;
4840 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4841 /* We only keep references to ports with non-zero VCPIs */
4843 drm_dp_mst_put_port_malloc(pos->port);
4850 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
4851 struct drm_dp_mst_branch *branch)
4853 while (port->parent) {
4854 if (port->parent == branch)
4857 if (port->parent->port_parent)
4858 port = port->parent->port_parent;
4866 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4867 struct drm_dp_mst_topology_state *state);
4870 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
4871 struct drm_dp_mst_topology_state *state)
4873 struct drm_dp_vcpi_allocation *vcpi;
4874 struct drm_dp_mst_port *port;
4875 int pbn_used = 0, ret;
4878 /* Check that we have at least one port in our state that's downstream
4879 * of this branch, otherwise we can skip this branch
4881 list_for_each_entry(vcpi, &state->vcpis, next) {
4883 !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
4892 if (mstb->port_parent)
4893 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
4894 mstb->port_parent->parent, mstb->port_parent,
4897 DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
4900 list_for_each_entry(port, &mstb->ports, next) {
4901 ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
4912 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
4913 struct drm_dp_mst_topology_state *state)
4915 struct drm_dp_vcpi_allocation *vcpi;
4918 if (port->pdt == DP_PEER_DEVICE_NONE)
4921 if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
4924 list_for_each_entry(vcpi, &state->vcpis, next) {
4925 if (vcpi->port != port)
4936 /* This should never happen, as it means we tried to
4937 * set a mode before querying the full_pbn
4939 if (WARN_ON(!port->full_pbn))
4942 pbn_used = vcpi->pbn;
4944 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
4950 if (pbn_used > port->full_pbn) {
4951 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
4952 port->parent, port, pbn_used,
4957 DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
4958 port->parent, port, pbn_used, port->full_pbn);
4964 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
4965 struct drm_dp_mst_topology_state *mst_state)
4967 struct drm_dp_vcpi_allocation *vcpi;
4968 int avail_slots = 63, payload_count = 0;
4970 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4971 /* Releasing VCPI is always OK-even if the port is gone */
4973 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4978 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4979 vcpi->port, vcpi->vcpi);
4981 avail_slots -= vcpi->vcpi;
4982 if (avail_slots < 0) {
4983 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4984 vcpi->port, mst_state,
4985 avail_slots + vcpi->vcpi);
4989 if (++payload_count > mgr->max_payloads) {
4990 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4991 mgr, mst_state, mgr->max_payloads);
4995 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4996 mgr, mst_state, avail_slots,
5003 * drm_dp_mst_add_affected_dsc_crtcs
5004 * @state: Pointer to the new struct drm_dp_mst_topology_state
5005 * @mgr: MST topology manager
5007 * Whenever there is a change in mst topology
5008 * DSC configuration would have to be recalculated
5009 * therefore we need to trigger modeset on all affected
5010 * CRTCs in that topology
5013 * drm_dp_mst_atomic_enable_dsc()
5015 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5017 struct drm_dp_mst_topology_state *mst_state;
5018 struct drm_dp_vcpi_allocation *pos;
5019 struct drm_connector *connector;
5020 struct drm_connector_state *conn_state;
5021 struct drm_crtc *crtc;
5022 struct drm_crtc_state *crtc_state;
5024 mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5026 if (IS_ERR(mst_state))
5029 list_for_each_entry(pos, &mst_state->vcpis, next) {
5031 connector = pos->port->connector;
5036 conn_state = drm_atomic_get_connector_state(state, connector);
5038 if (IS_ERR(conn_state))
5039 return PTR_ERR(conn_state);
5041 crtc = conn_state->crtc;
5046 if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5049 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5051 if (IS_ERR(crtc_state))
5052 return PTR_ERR(crtc_state);
5054 DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5057 crtc_state->mode_changed = true;
5061 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5064 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5065 * @state: Pointer to the new drm_atomic_state
5066 * @port: Pointer to the affected MST Port
5067 * @pbn: Newly recalculated bw required for link with DSC enabled
5068 * @pbn_div: Divider to calculate correct number of pbn per slot
5069 * @enable: Boolean flag to enable or disable DSC on the port
5071 * This function enables DSC on the given Port
5072 * by recalculating its vcpi from pbn provided
5073 * and sets dsc_enable flag to keep track of which
5074 * ports have DSC enabled
5077 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5078 struct drm_dp_mst_port *port,
5079 int pbn, int pbn_div,
5082 struct drm_dp_mst_topology_state *mst_state;
5083 struct drm_dp_vcpi_allocation *pos;
5087 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5089 if (IS_ERR(mst_state))
5090 return PTR_ERR(mst_state);
5092 list_for_each_entry(pos, &mst_state->vcpis, next) {
5093 if (pos->port == port) {
5100 DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5105 if (pos->dsc_enabled == enable) {
5106 DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5107 port, enable, pos->vcpi);
5112 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5113 DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5119 pos->dsc_enabled = enable;
5123 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5125 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5126 * atomic update is valid
5127 * @state: Pointer to the new &struct drm_dp_mst_topology_state
5129 * Checks the given topology state for an atomic update to ensure that it's
5130 * valid. This includes checking whether there's enough bandwidth to support
5131 * the new VCPI allocations in the atomic update.
5133 * Any atomic drivers supporting DP MST must make sure to call this after
5134 * checking the rest of their state in their
5135 * &drm_mode_config_funcs.atomic_check() callback.
5138 * drm_dp_atomic_find_vcpi_slots()
5139 * drm_dp_atomic_release_vcpi_slots()
5143 * 0 if the new state is valid, negative error code otherwise.
5145 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5147 struct drm_dp_mst_topology_mgr *mgr;
5148 struct drm_dp_mst_topology_state *mst_state;
5151 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5152 if (!mgr->mst_state)
5155 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5159 mutex_lock(&mgr->lock);
5160 ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5162 mutex_unlock(&mgr->lock);
5171 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5173 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5174 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
5175 .atomic_destroy_state = drm_dp_mst_destroy_state,
5177 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5180 * drm_atomic_get_mst_topology_state: get MST topology state
5182 * @state: global atomic state
5183 * @mgr: MST topology manager, also the private object in this case
5185 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5186 * state vtable so that the private object state returned is that of a MST
5187 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
5188 * to care of the locking, so warn if don't hold the connection_mutex.
5192 * The MST topology state or error pointer.
5194 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5195 struct drm_dp_mst_topology_mgr *mgr)
5197 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5199 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5202 * drm_dp_mst_topology_mgr_init - initialise a topology manager
5203 * @mgr: manager struct to initialise
5204 * @dev: device providing this structure - for i2c addition.
5205 * @aux: DP helper aux channel to talk to this device
5206 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5207 * @max_payloads: maximum number of payloads this GPU can source
5208 * @conn_base_id: the connector object ID the MST device is connected to.
5210 * Return 0 for success, or negative error code on failure
5212 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5213 struct drm_device *dev, struct drm_dp_aux *aux,
5214 int max_dpcd_transaction_bytes,
5215 int max_payloads, int conn_base_id)
5217 struct drm_dp_mst_topology_state *mst_state;
5219 mutex_init(&mgr->lock);
5220 mutex_init(&mgr->qlock);
5221 mutex_init(&mgr->payload_lock);
5222 mutex_init(&mgr->delayed_destroy_lock);
5223 mutex_init(&mgr->up_req_lock);
5224 mutex_init(&mgr->probe_lock);
5225 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5226 mutex_init(&mgr->topology_ref_history_lock);
5228 INIT_LIST_HEAD(&mgr->tx_msg_downq);
5229 INIT_LIST_HEAD(&mgr->destroy_port_list);
5230 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5231 INIT_LIST_HEAD(&mgr->up_req_list);
5232 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5233 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5234 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5235 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5236 init_waitqueue_head(&mgr->tx_waitq);
5239 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5240 mgr->max_payloads = max_payloads;
5241 mgr->conn_base_id = conn_base_id;
5242 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5243 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5245 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5248 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5249 if (!mgr->proposed_vcpis)
5251 set_bit(0, &mgr->payload_mask);
5253 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5254 if (mst_state == NULL)
5257 mst_state->mgr = mgr;
5258 INIT_LIST_HEAD(&mst_state->vcpis);
5260 drm_atomic_private_obj_init(dev, &mgr->base,
5262 &drm_dp_mst_topology_state_funcs);
5266 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5269 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5270 * @mgr: manager to destroy
5272 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5274 drm_dp_mst_topology_mgr_set_mst(mgr, false);
5275 flush_work(&mgr->work);
5276 cancel_work_sync(&mgr->delayed_destroy_work);
5277 mutex_lock(&mgr->payload_lock);
5278 kfree(mgr->payloads);
5279 mgr->payloads = NULL;
5280 kfree(mgr->proposed_vcpis);
5281 mgr->proposed_vcpis = NULL;
5282 mutex_unlock(&mgr->payload_lock);
5285 drm_atomic_private_obj_fini(&mgr->base);
5288 mutex_destroy(&mgr->delayed_destroy_lock);
5289 mutex_destroy(&mgr->payload_lock);
5290 mutex_destroy(&mgr->qlock);
5291 mutex_destroy(&mgr->lock);
5292 mutex_destroy(&mgr->up_req_lock);
5293 mutex_destroy(&mgr->probe_lock);
5294 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5295 mutex_destroy(&mgr->topology_ref_history_lock);
5298 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5300 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5304 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5307 for (i = 0; i < num - 1; i++) {
5308 if (msgs[i].flags & I2C_M_RD ||
5313 return msgs[num - 1].flags & I2C_M_RD &&
5314 msgs[num - 1].len <= 0xff;
5318 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
5321 struct drm_dp_aux *aux = adapter->algo_data;
5322 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
5323 struct drm_dp_mst_branch *mstb;
5324 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5326 struct drm_dp_sideband_msg_req_body msg;
5327 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5330 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5334 if (!remote_i2c_read_ok(msgs, num)) {
5335 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5340 memset(&msg, 0, sizeof(msg));
5341 msg.req_type = DP_REMOTE_I2C_READ;
5342 msg.u.i2c_read.num_transactions = num - 1;
5343 msg.u.i2c_read.port_number = port->port_num;
5344 for (i = 0; i < num - 1; i++) {
5345 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5346 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5347 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5348 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5350 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5351 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5353 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5360 drm_dp_encode_sideband_req(&msg, txmsg);
5362 drm_dp_queue_down_tx(mgr, txmsg);
5364 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5367 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5371 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5375 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5380 drm_dp_mst_topology_put_mstb(mstb);
5384 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5386 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5387 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5388 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5389 I2C_FUNC_10BIT_ADDR;
5392 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5393 .functionality = drm_dp_mst_i2c_functionality,
5394 .master_xfer = drm_dp_mst_i2c_xfer,
5398 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5399 * @aux: DisplayPort AUX channel
5401 * Returns 0 on success or a negative error code on failure.
5403 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5405 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5406 aux->ddc.algo_data = aux;
5407 aux->ddc.retries = 3;
5409 aux->ddc.class = I2C_CLASS_DDC;
5410 aux->ddc.owner = THIS_MODULE;
5411 aux->ddc.dev.parent = aux->dev;
5412 aux->ddc.dev.of_node = aux->dev->of_node;
5414 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5415 sizeof(aux->ddc.name));
5417 return i2c_add_adapter(&aux->ddc);
5421 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5422 * @aux: DisplayPort AUX channel
5424 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5426 i2c_del_adapter(&aux->ddc);
5430 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5431 * @port: The port to check
5433 * A single physical MST hub object can be represented in the topology
5434 * by multiple branches, with virtual ports between those branches.
5436 * As of DP1.4, An MST hub with internal (virtual) ports must expose
5437 * certain DPCD registers over those ports. See sections 2.6.1.1.1
5438 * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5440 * May acquire mgr->lock
5443 * true if the port is a virtual DP peer device, false otherwise
5445 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5447 struct drm_dp_mst_port *downstream_port;
5449 if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5452 /* Virtual DP Sink (Internal Display Panel) */
5453 if (port->port_num >= 8)
5456 /* DP-to-HDMI Protocol Converter */
5457 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5463 mutex_lock(&port->mgr->lock);
5464 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5466 port->mstb->num_ports == 2) {
5467 list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5468 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5469 !downstream_port->input) {
5470 mutex_unlock(&port->mgr->lock);
5475 mutex_unlock(&port->mgr->lock);
5481 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5482 * @port: The port to check. A leaf of the MST tree with an attached display.
5484 * Depending on the situation, DSC may be enabled via the endpoint aux,
5485 * the immediately upstream aux, or the connector's physical aux.
5487 * This is both the correct aux to read DSC_CAPABILITY and the
5488 * correct aux to write DSC_ENABLED.
5490 * This operation can be expensive (up to four aux reads), so
5491 * the caller should cache the return.
5494 * NULL if DSC cannot be enabled on this port, otherwise the aux device
5496 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5498 struct drm_dp_mst_port *immediate_upstream_port;
5499 struct drm_dp_mst_port *fec_port;
5500 struct drm_dp_desc desc = { 0 };
5507 if (port->parent->port_parent)
5508 immediate_upstream_port = port->parent->port_parent;
5510 immediate_upstream_port = NULL;
5512 fec_port = immediate_upstream_port;
5515 * Each physical link (i.e. not a virtual port) between the
5516 * output and the primary device must support FEC
5518 if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5519 !fec_port->fec_capable)
5522 fec_port = fec_port->parent->port_parent;
5525 /* DP-to-DP peer device */
5526 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5529 if (drm_dp_dpcd_read(&port->aux,
5530 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5532 if (drm_dp_dpcd_read(&port->aux,
5533 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5535 if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5536 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5539 /* Enpoint decompression with DP-to-DP peer device */
5540 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5541 (endpoint_fec & DP_FEC_CAPABLE) &&
5542 (upstream_dsc & 0x2) /* DSC passthrough */)
5545 /* Virtual DPCD decompression with DP-to-DP peer device */
5546 return &immediate_upstream_port->aux;
5549 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5550 if (drm_dp_mst_is_virtual_dpcd(port))
5555 * Applies to ports for which:
5556 * - Physical aux has Synaptics OUI
5557 * - DPv1.4 or higher
5558 * - Port is on primary branch device
5559 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5561 if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5564 if (drm_dp_has_quirk(&desc, 0,
5565 DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5566 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5567 port->parent == port->mgr->mst_primary) {
5570 if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5571 &downstreamport, 1) < 0)
5574 if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5575 ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5576 != DP_DWN_STRM_PORT_TYPE_ANALOG))
5577 return port->mgr->aux;
5581 * The check below verifies if the MST sink
5582 * connected to the GPU is capable of DSC -
5583 * therefore the endpoint needs to be
5584 * both DSC and FEC capable.
5586 if (drm_dp_dpcd_read(&port->aux,
5587 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5589 if (drm_dp_dpcd_read(&port->aux,
5590 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5592 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5593 (endpoint_fec & DP_FEC_CAPABLE))
5598 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);