2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
31 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
32 #include <linux/stacktrace.h>
33 #include <linux/sort.h>
34 #include <linux/timekeeping.h>
35 #include <linux/math64.h>
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_dp_mst_helper.h>
41 #include <drm/drm_drv.h>
42 #include <drm/drm_print.h>
43 #include <drm/drm_probe_helper.h>
45 #include "drm_crtc_helper_internal.h"
46 #include "drm_dp_mst_topology_internal.h"
51 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
52 * protocol. The helpers contain a topology manager and bandwidth manager.
53 * The helpers encapsulate the sending and received of sideband msgs.
55 struct drm_dp_pending_up_req {
56 struct drm_dp_sideband_msg_hdr hdr;
57 struct drm_dp_sideband_msg_req_body msg;
58 struct list_head next;
61 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
64 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
66 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
68 struct drm_dp_payload *payload);
70 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
71 struct drm_dp_mst_port *port,
72 int offset, int size, u8 *bytes);
73 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
74 struct drm_dp_mst_port *port,
75 int offset, int size, u8 *bytes);
77 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
78 struct drm_dp_mst_branch *mstb);
79 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
80 struct drm_dp_mst_branch *mstb,
81 struct drm_dp_mst_port *port);
82 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
85 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
86 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
87 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
89 #define DBG_PREFIX "[dp_mst]"
91 #define DP_STR(x) [DP_ ## x] = #x
93 static const char *drm_dp_mst_req_type_str(u8 req_type)
95 static const char * const req_type_str[] = {
96 DP_STR(GET_MSG_TRANSACTION_VERSION),
98 DP_STR(CONNECTION_STATUS_NOTIFY),
99 DP_STR(ENUM_PATH_RESOURCES),
100 DP_STR(ALLOCATE_PAYLOAD),
101 DP_STR(QUERY_PAYLOAD),
102 DP_STR(RESOURCE_STATUS_NOTIFY),
103 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
104 DP_STR(REMOTE_DPCD_READ),
105 DP_STR(REMOTE_DPCD_WRITE),
106 DP_STR(REMOTE_I2C_READ),
107 DP_STR(REMOTE_I2C_WRITE),
108 DP_STR(POWER_UP_PHY),
109 DP_STR(POWER_DOWN_PHY),
110 DP_STR(SINK_EVENT_NOTIFY),
111 DP_STR(QUERY_STREAM_ENC_STATUS),
114 if (req_type >= ARRAY_SIZE(req_type_str) ||
115 !req_type_str[req_type])
118 return req_type_str[req_type];
122 #define DP_STR(x) [DP_NAK_ ## x] = #x
124 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
126 static const char * const nak_reason_str[] = {
127 DP_STR(WRITE_FAILURE),
128 DP_STR(INVALID_READ),
132 DP_STR(LINK_FAILURE),
133 DP_STR(NO_RESOURCES),
136 DP_STR(ALLOCATE_FAIL),
139 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
140 !nak_reason_str[nak_reason])
143 return nak_reason_str[nak_reason];
147 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
149 static const char *drm_dp_mst_sideband_tx_state_str(int state)
151 static const char * const sideband_reason_str[] = {
159 if (state >= ARRAY_SIZE(sideband_reason_str) ||
160 !sideband_reason_str[state])
163 return sideband_reason_str[state];
167 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
172 for (i = 0; i < lct; i++) {
174 unpacked_rad[i] = rad[i / 2] >> 4;
176 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
179 /* TODO: Eventually add something to printk so we can format the rad
182 return snprintf(out, len, "%*phC", lct, unpacked_rad);
185 /* sideband msg handling */
186 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
191 int number_of_bits = num_nibbles * 4;
194 while (number_of_bits != 0) {
197 remainder |= (data[array_index] & bitmask) >> bitshift;
205 if ((remainder & 0x10) == 0x10)
210 while (number_of_bits != 0) {
213 if ((remainder & 0x10) != 0)
220 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
225 int number_of_bits = number_of_bytes * 8;
228 while (number_of_bits != 0) {
231 remainder |= (data[array_index] & bitmask) >> bitshift;
239 if ((remainder & 0x100) == 0x100)
244 while (number_of_bits != 0) {
247 if ((remainder & 0x100) != 0)
251 return remainder & 0xff;
253 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
256 size += (hdr->lct / 2);
260 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
266 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
267 for (i = 0; i < (hdr->lct / 2); i++)
268 buf[idx++] = hdr->rad[i];
269 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
270 (hdr->msg_len & 0x3f);
271 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
273 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
274 buf[idx - 1] |= (crc4 & 0xf);
279 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
280 u8 *buf, int buflen, u8 *hdrlen)
289 len += ((buf[0] & 0xf0) >> 4) / 2;
292 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
294 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
295 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
299 hdr->lct = (buf[0] & 0xf0) >> 4;
300 hdr->lcr = (buf[0] & 0xf);
302 for (i = 0; i < (hdr->lct / 2); i++)
303 hdr->rad[i] = buf[idx++];
304 hdr->broadcast = (buf[idx] >> 7) & 0x1;
305 hdr->path_msg = (buf[idx] >> 6) & 0x1;
306 hdr->msg_len = buf[idx] & 0x3f;
308 hdr->somt = (buf[idx] >> 7) & 0x1;
309 hdr->eomt = (buf[idx] >> 6) & 0x1;
310 hdr->seqno = (buf[idx] >> 4) & 0x1;
317 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
318 struct drm_dp_sideband_msg_tx *raw)
323 buf[idx++] = req->req_type & 0x7f;
325 switch (req->req_type) {
326 case DP_ENUM_PATH_RESOURCES:
327 case DP_POWER_DOWN_PHY:
328 case DP_POWER_UP_PHY:
329 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
332 case DP_ALLOCATE_PAYLOAD:
333 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
334 (req->u.allocate_payload.number_sdp_streams & 0xf);
336 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
338 buf[idx] = (req->u.allocate_payload.pbn >> 8);
340 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
342 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
343 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
344 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
347 if (req->u.allocate_payload.number_sdp_streams & 1) {
348 i = req->u.allocate_payload.number_sdp_streams - 1;
349 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
353 case DP_QUERY_PAYLOAD:
354 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
356 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
359 case DP_REMOTE_DPCD_READ:
360 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
361 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
363 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
365 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
367 buf[idx] = (req->u.dpcd_read.num_bytes);
371 case DP_REMOTE_DPCD_WRITE:
372 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
373 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
375 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
377 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
379 buf[idx] = (req->u.dpcd_write.num_bytes);
381 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
382 idx += req->u.dpcd_write.num_bytes;
384 case DP_REMOTE_I2C_READ:
385 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
386 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
388 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
389 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
391 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
393 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
394 idx += req->u.i2c_read.transactions[i].num_bytes;
396 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
397 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
400 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
402 buf[idx] = (req->u.i2c_read.num_bytes_read);
406 case DP_REMOTE_I2C_WRITE:
407 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
409 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
411 buf[idx] = (req->u.i2c_write.num_bytes);
413 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
414 idx += req->u.i2c_write.num_bytes;
419 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
421 /* Decode a sideband request we've encoded, mainly used for debugging */
423 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
424 struct drm_dp_sideband_msg_req_body *req)
426 const u8 *buf = raw->msg;
429 req->req_type = buf[idx++] & 0x7f;
430 switch (req->req_type) {
431 case DP_ENUM_PATH_RESOURCES:
432 case DP_POWER_DOWN_PHY:
433 case DP_POWER_UP_PHY:
434 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
436 case DP_ALLOCATE_PAYLOAD:
438 struct drm_dp_allocate_payload *a =
439 &req->u.allocate_payload;
441 a->number_sdp_streams = buf[idx] & 0xf;
442 a->port_number = (buf[idx] >> 4) & 0xf;
444 WARN_ON(buf[++idx] & 0x80);
445 a->vcpi = buf[idx] & 0x7f;
447 a->pbn = buf[++idx] << 8;
448 a->pbn |= buf[++idx];
451 for (i = 0; i < a->number_sdp_streams; i++) {
452 a->sdp_stream_sink[i] =
453 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
457 case DP_QUERY_PAYLOAD:
458 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
459 WARN_ON(buf[++idx] & 0x80);
460 req->u.query_payload.vcpi = buf[idx] & 0x7f;
462 case DP_REMOTE_DPCD_READ:
464 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
466 r->port_number = (buf[idx] >> 4) & 0xf;
468 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
469 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
470 r->dpcd_address |= buf[++idx] & 0xff;
472 r->num_bytes = buf[++idx];
475 case DP_REMOTE_DPCD_WRITE:
477 struct drm_dp_remote_dpcd_write *w =
480 w->port_number = (buf[idx] >> 4) & 0xf;
482 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
483 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
484 w->dpcd_address |= buf[++idx] & 0xff;
486 w->num_bytes = buf[++idx];
488 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
494 case DP_REMOTE_I2C_READ:
496 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
497 struct drm_dp_remote_i2c_read_tx *tx;
500 r->num_transactions = buf[idx] & 0x3;
501 r->port_number = (buf[idx] >> 4) & 0xf;
502 for (i = 0; i < r->num_transactions; i++) {
503 tx = &r->transactions[i];
505 tx->i2c_dev_id = buf[++idx] & 0x7f;
506 tx->num_bytes = buf[++idx];
507 tx->bytes = kmemdup(&buf[++idx],
514 idx += tx->num_bytes;
515 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
516 tx->i2c_transaction_delay = buf[idx] & 0xf;
520 for (i = 0; i < r->num_transactions; i++)
525 r->read_i2c_device_id = buf[++idx] & 0x7f;
526 r->num_bytes_read = buf[++idx];
529 case DP_REMOTE_I2C_WRITE:
531 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
533 w->port_number = (buf[idx] >> 4) & 0xf;
534 w->write_i2c_device_id = buf[++idx] & 0x7f;
535 w->num_bytes = buf[++idx];
536 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
546 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
549 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
550 int indent, struct drm_printer *printer)
554 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
555 if (req->req_type == DP_LINK_ADDRESS) {
556 /* No contents to print */
557 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
561 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
564 switch (req->req_type) {
565 case DP_ENUM_PATH_RESOURCES:
566 case DP_POWER_DOWN_PHY:
567 case DP_POWER_UP_PHY:
568 P("port=%d\n", req->u.port_num.port_number);
570 case DP_ALLOCATE_PAYLOAD:
571 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
572 req->u.allocate_payload.port_number,
573 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
574 req->u.allocate_payload.number_sdp_streams,
575 req->u.allocate_payload.number_sdp_streams,
576 req->u.allocate_payload.sdp_stream_sink);
578 case DP_QUERY_PAYLOAD:
579 P("port=%d vcpi=%d\n",
580 req->u.query_payload.port_number,
581 req->u.query_payload.vcpi);
583 case DP_REMOTE_DPCD_READ:
584 P("port=%d dpcd_addr=%05x len=%d\n",
585 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
586 req->u.dpcd_read.num_bytes);
588 case DP_REMOTE_DPCD_WRITE:
589 P("port=%d addr=%05x len=%d: %*ph\n",
590 req->u.dpcd_write.port_number,
591 req->u.dpcd_write.dpcd_address,
592 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
593 req->u.dpcd_write.bytes);
595 case DP_REMOTE_I2C_READ:
596 P("port=%d num_tx=%d id=%d size=%d:\n",
597 req->u.i2c_read.port_number,
598 req->u.i2c_read.num_transactions,
599 req->u.i2c_read.read_i2c_device_id,
600 req->u.i2c_read.num_bytes_read);
603 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
604 const struct drm_dp_remote_i2c_read_tx *rtx =
605 &req->u.i2c_read.transactions[i];
607 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
608 i, rtx->i2c_dev_id, rtx->num_bytes,
609 rtx->no_stop_bit, rtx->i2c_transaction_delay,
610 rtx->num_bytes, rtx->bytes);
613 case DP_REMOTE_I2C_WRITE:
614 P("port=%d id=%d size=%d: %*ph\n",
615 req->u.i2c_write.port_number,
616 req->u.i2c_write.write_i2c_device_id,
617 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
618 req->u.i2c_write.bytes);
626 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
629 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
630 const struct drm_dp_sideband_msg_tx *txmsg)
632 struct drm_dp_sideband_msg_req_body req;
637 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
639 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
640 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
641 drm_dp_mst_sideband_tx_state_str(txmsg->state),
642 txmsg->path_msg, buf);
644 ret = drm_dp_decode_sideband_req(txmsg, &req);
646 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
649 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
651 switch (req.req_type) {
652 case DP_REMOTE_DPCD_WRITE:
653 kfree(req.u.dpcd_write.bytes);
655 case DP_REMOTE_I2C_READ:
656 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
657 kfree(req.u.i2c_read.transactions[i].bytes);
659 case DP_REMOTE_I2C_WRITE:
660 kfree(req.u.i2c_write.bytes);
665 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
668 crc4 = drm_dp_msg_data_crc4(msg, len);
672 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
673 struct drm_dp_sideband_msg_tx *raw)
678 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
683 /* this adds a chunk of msg to the builder to get the final msg */
684 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
685 u8 *replybuf, u8 replybuflen, bool hdr)
692 struct drm_dp_sideband_msg_hdr recv_hdr;
693 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
695 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
700 * ignore out-of-order messages or messages that are part of a
703 if (!recv_hdr.somt && !msg->have_somt)
706 /* get length contained in this portion */
707 msg->curchunk_len = recv_hdr.msg_len;
708 msg->curchunk_hdrlen = hdrlen;
710 /* we have already gotten an somt - don't bother parsing */
711 if (recv_hdr.somt && msg->have_somt)
715 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
716 msg->have_somt = true;
719 msg->have_eomt = true;
721 /* copy the bytes for the remainder of this header chunk */
722 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
723 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
725 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
726 msg->curchunk_idx += replybuflen;
729 if (msg->curchunk_idx >= msg->curchunk_len) {
731 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
732 /* copy chunk into bigger msg */
733 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
734 msg->curlen += msg->curchunk_len - 1;
739 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
740 struct drm_dp_sideband_msg_reply_body *repmsg)
744 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
746 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
748 if (idx > raw->curlen)
750 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
751 if (raw->msg[idx] & 0x80)
752 repmsg->u.link_addr.ports[i].input_port = 1;
754 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
755 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
758 if (idx > raw->curlen)
760 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
761 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
762 if (repmsg->u.link_addr.ports[i].input_port == 0)
763 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
765 if (idx > raw->curlen)
767 if (repmsg->u.link_addr.ports[i].input_port == 0) {
768 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
770 if (idx > raw->curlen)
772 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
774 if (idx > raw->curlen)
776 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
777 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
781 if (idx > raw->curlen)
787 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
791 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
792 struct drm_dp_sideband_msg_reply_body *repmsg)
795 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
797 if (idx > raw->curlen)
799 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
801 if (idx > raw->curlen)
804 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
807 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
811 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
812 struct drm_dp_sideband_msg_reply_body *repmsg)
815 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
817 if (idx > raw->curlen)
821 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
825 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
826 struct drm_dp_sideband_msg_reply_body *repmsg)
830 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
832 if (idx > raw->curlen)
834 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
837 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
840 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
844 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
845 struct drm_dp_sideband_msg_reply_body *repmsg)
848 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
850 if (idx > raw->curlen)
852 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
854 if (idx > raw->curlen)
856 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
858 if (idx > raw->curlen)
862 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
866 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
867 struct drm_dp_sideband_msg_reply_body *repmsg)
870 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
872 if (idx > raw->curlen)
874 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
876 if (idx > raw->curlen)
878 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
880 if (idx > raw->curlen)
884 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
888 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
889 struct drm_dp_sideband_msg_reply_body *repmsg)
892 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
894 if (idx > raw->curlen)
896 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
898 if (idx > raw->curlen)
902 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
906 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
907 struct drm_dp_sideband_msg_reply_body *repmsg)
911 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
913 if (idx > raw->curlen) {
914 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
921 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
922 struct drm_dp_sideband_msg_reply_body *msg)
924 memset(msg, 0, sizeof(*msg));
925 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
926 msg->req_type = (raw->msg[0] & 0x7f);
928 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
929 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
930 msg->u.nak.reason = raw->msg[17];
931 msg->u.nak.nak_data = raw->msg[18];
935 switch (msg->req_type) {
936 case DP_LINK_ADDRESS:
937 return drm_dp_sideband_parse_link_address(raw, msg);
938 case DP_QUERY_PAYLOAD:
939 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
940 case DP_REMOTE_DPCD_READ:
941 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
942 case DP_REMOTE_DPCD_WRITE:
943 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
944 case DP_REMOTE_I2C_READ:
945 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
946 case DP_ENUM_PATH_RESOURCES:
947 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
948 case DP_ALLOCATE_PAYLOAD:
949 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
950 case DP_POWER_DOWN_PHY:
951 case DP_POWER_UP_PHY:
952 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
954 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
955 drm_dp_mst_req_type_str(msg->req_type));
960 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
961 struct drm_dp_sideband_msg_req_body *msg)
965 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
967 if (idx > raw->curlen)
970 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
972 if (idx > raw->curlen)
975 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
976 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
977 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
978 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
979 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
983 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
987 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
988 struct drm_dp_sideband_msg_req_body *msg)
992 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
994 if (idx > raw->curlen)
997 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
999 if (idx > raw->curlen)
1002 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1006 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1010 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1011 struct drm_dp_sideband_msg_req_body *msg)
1013 memset(msg, 0, sizeof(*msg));
1014 msg->req_type = (raw->msg[0] & 0x7f);
1016 switch (msg->req_type) {
1017 case DP_CONNECTION_STATUS_NOTIFY:
1018 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1019 case DP_RESOURCE_STATUS_NOTIFY:
1020 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1022 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1023 drm_dp_mst_req_type_str(msg->req_type));
1028 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1030 struct drm_dp_sideband_msg_req_body req;
1032 req.req_type = DP_REMOTE_DPCD_WRITE;
1033 req.u.dpcd_write.port_number = port_num;
1034 req.u.dpcd_write.dpcd_address = offset;
1035 req.u.dpcd_write.num_bytes = num_bytes;
1036 req.u.dpcd_write.bytes = bytes;
1037 drm_dp_encode_sideband_req(&req, msg);
1042 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1044 struct drm_dp_sideband_msg_req_body req;
1046 req.req_type = DP_LINK_ADDRESS;
1047 drm_dp_encode_sideband_req(&req, msg);
1051 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1053 struct drm_dp_sideband_msg_req_body req;
1055 req.req_type = DP_ENUM_PATH_RESOURCES;
1056 req.u.port_num.port_number = port_num;
1057 drm_dp_encode_sideband_req(&req, msg);
1058 msg->path_msg = true;
1062 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1063 u8 vcpi, uint16_t pbn,
1064 u8 number_sdp_streams,
1065 u8 *sdp_stream_sink)
1067 struct drm_dp_sideband_msg_req_body req;
1068 memset(&req, 0, sizeof(req));
1069 req.req_type = DP_ALLOCATE_PAYLOAD;
1070 req.u.allocate_payload.port_number = port_num;
1071 req.u.allocate_payload.vcpi = vcpi;
1072 req.u.allocate_payload.pbn = pbn;
1073 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1074 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1075 number_sdp_streams);
1076 drm_dp_encode_sideband_req(&req, msg);
1077 msg->path_msg = true;
1081 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1082 int port_num, bool power_up)
1084 struct drm_dp_sideband_msg_req_body req;
1087 req.req_type = DP_POWER_UP_PHY;
1089 req.req_type = DP_POWER_DOWN_PHY;
1091 req.u.port_num.port_number = port_num;
1092 drm_dp_encode_sideband_req(&req, msg);
1093 msg->path_msg = true;
1097 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1098 struct drm_dp_vcpi *vcpi)
1102 mutex_lock(&mgr->payload_lock);
1103 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1104 if (ret > mgr->max_payloads) {
1106 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1110 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1111 if (vcpi_ret > mgr->max_payloads) {
1113 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1117 set_bit(ret, &mgr->payload_mask);
1118 set_bit(vcpi_ret, &mgr->vcpi_mask);
1119 vcpi->vcpi = vcpi_ret + 1;
1120 mgr->proposed_vcpis[ret - 1] = vcpi;
1122 mutex_unlock(&mgr->payload_lock);
1126 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1133 mutex_lock(&mgr->payload_lock);
1134 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1135 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1137 for (i = 0; i < mgr->max_payloads; i++) {
1138 if (mgr->proposed_vcpis[i] &&
1139 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1140 mgr->proposed_vcpis[i] = NULL;
1141 clear_bit(i + 1, &mgr->payload_mask);
1144 mutex_unlock(&mgr->payload_lock);
1147 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1148 struct drm_dp_sideband_msg_tx *txmsg)
1153 * All updates to txmsg->state are protected by mgr->qlock, and the two
1154 * cases we check here are terminal states. For those the barriers
1155 * provided by the wake_up/wait_event pair are enough.
1157 state = READ_ONCE(txmsg->state);
1158 return (state == DRM_DP_SIDEBAND_TX_RX ||
1159 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1162 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1163 struct drm_dp_sideband_msg_tx *txmsg)
1165 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1168 ret = wait_event_timeout(mgr->tx_waitq,
1169 check_txmsg_state(mgr, txmsg),
1171 mutex_lock(&mstb->mgr->qlock);
1173 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1178 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1180 /* dump some state */
1184 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1185 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1186 list_del(&txmsg->next);
1189 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1190 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1191 mstb->tx_slots[txmsg->seqno] = NULL;
1193 mgr->is_waiting_for_dwn_reply = false;
1197 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1198 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1200 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1202 mutex_unlock(&mgr->qlock);
1204 drm_dp_mst_kick_tx(mgr);
1208 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1210 struct drm_dp_mst_branch *mstb;
1212 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1218 memcpy(mstb->rad, rad, lct / 2);
1219 INIT_LIST_HEAD(&mstb->ports);
1220 kref_init(&mstb->topology_kref);
1221 kref_init(&mstb->malloc_kref);
1225 static void drm_dp_free_mst_branch_device(struct kref *kref)
1227 struct drm_dp_mst_branch *mstb =
1228 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1230 if (mstb->port_parent)
1231 drm_dp_mst_put_port_malloc(mstb->port_parent);
1237 * DOC: Branch device and port refcounting
1239 * Topology refcount overview
1240 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1242 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1243 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1244 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1246 * Topology refcounts are not exposed to drivers, and are handled internally
1247 * by the DP MST helpers. The helpers use them in order to prevent the
1248 * in-memory topology state from being changed in the middle of critical
1249 * operations like changing the internal state of payload allocations. This
1250 * means each branch and port will be considered to be connected to the rest
1251 * of the topology until its topology refcount reaches zero. Additionally,
1252 * for ports this means that their associated &struct drm_connector will stay
1253 * registered with userspace until the port's refcount reaches 0.
1255 * Malloc refcount overview
1256 * ~~~~~~~~~~~~~~~~~~~~~~~~
1258 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1259 * drm_dp_mst_branch allocated even after all of its topology references have
1260 * been dropped, so that the driver or MST helpers can safely access each
1261 * branch's last known state before it was disconnected from the topology.
1262 * When the malloc refcount of a port or branch reaches 0, the memory
1263 * allocation containing the &struct drm_dp_mst_branch or &struct
1264 * drm_dp_mst_port respectively will be freed.
1266 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1267 * to drivers. As of writing this documentation, there are no drivers that
1268 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1269 * helpers. Exposing this API to drivers in a race-free manner would take more
1270 * tweaking of the refcounting scheme, however patches are welcome provided
1271 * there is a legitimate driver usecase for this.
1273 * Refcount relationships in a topology
1274 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1276 * Let's take a look at why the relationship between topology and malloc
1277 * refcounts is designed the way it is.
1279 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1281 * An example of topology and malloc refs in a DP MST topology with two
1282 * active payloads. Topology refcount increments are indicated by solid
1283 * lines, and malloc refcount increments are indicated by dashed lines.
1284 * Each starts from the branch which incremented the refcount, and ends at
1285 * the branch to which the refcount belongs to, i.e. the arrow points the
1286 * same way as the C pointers used to reference a structure.
1288 * As you can see in the above figure, every branch increments the topology
1289 * refcount of its children, and increments the malloc refcount of its
1290 * parent. Additionally, every payload increments the malloc refcount of its
1291 * assigned port by 1.
1293 * So, what would happen if MSTB #3 from the above figure was unplugged from
1294 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1295 * topology would start to look like the figure below.
1297 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1299 * Ports and branch devices which have been released from memory are
1300 * colored grey, and references which have been removed are colored red.
1302 * Whenever a port or branch device's topology refcount reaches zero, it will
1303 * decrement the topology refcounts of all its children, the malloc refcount
1304 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1305 * #4, this means they both have been disconnected from the topology and freed
1306 * from memory. But, because payload #2 is still holding a reference to port
1307 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1308 * is still accessible from memory. This also means port #3 has not yet
1309 * decremented the malloc refcount of MSTB #3, so its &struct
1310 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1311 * malloc refcount reaches 0.
1313 * This relationship is necessary because in order to release payload #2, we
1314 * need to be able to figure out the last relative of port #3 that's still
1315 * connected to the topology. In this case, we would travel up the topology as
1318 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1320 * And finally, remove payload #2 by communicating with port #2 through
1321 * sideband transactions.
1325 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1327 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1329 * Increments &drm_dp_mst_branch.malloc_kref. When
1330 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1331 * will be released and @mstb may no longer be used.
1333 * See also: drm_dp_mst_put_mstb_malloc()
1336 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1338 kref_get(&mstb->malloc_kref);
1339 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1343 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1345 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1347 * Decrements &drm_dp_mst_branch.malloc_kref. When
1348 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1349 * will be released and @mstb may no longer be used.
1351 * See also: drm_dp_mst_get_mstb_malloc()
1354 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1356 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1357 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1360 static void drm_dp_free_mst_port(struct kref *kref)
1362 struct drm_dp_mst_port *port =
1363 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1365 drm_dp_mst_put_mstb_malloc(port->parent);
1370 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1371 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1373 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1374 * reaches 0, the memory allocation for @port will be released and @port may
1375 * no longer be used.
1377 * Because @port could potentially be freed at any time by the DP MST helpers
1378 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1379 * function, drivers that which to make use of &struct drm_dp_mst_port should
1380 * ensure that they grab at least one main malloc reference to their MST ports
1381 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1382 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1384 * See also: drm_dp_mst_put_port_malloc()
1387 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1389 kref_get(&port->malloc_kref);
1390 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1392 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1395 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1396 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1398 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1399 * reaches 0, the memory allocation for @port will be released and @port may
1400 * no longer be used.
1402 * See also: drm_dp_mst_get_port_malloc()
1405 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1407 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1408 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1410 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1412 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1414 #define STACK_DEPTH 8
1416 static noinline void
1417 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1418 struct drm_dp_mst_topology_ref_history *history,
1419 enum drm_dp_mst_topology_ref_type type)
1421 struct drm_dp_mst_topology_ref_entry *entry = NULL;
1422 depot_stack_handle_t backtrace;
1423 ulong stack_entries[STACK_DEPTH];
1427 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1428 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1432 /* Try to find an existing entry for this backtrace */
1433 for (i = 0; i < history->len; i++) {
1434 if (history->entries[i].backtrace == backtrace) {
1435 entry = &history->entries[i];
1440 /* Otherwise add one */
1442 struct drm_dp_mst_topology_ref_entry *new;
1443 int new_len = history->len + 1;
1445 new = krealloc(history->entries, sizeof(*new) * new_len,
1450 entry = &new[history->len];
1451 history->len = new_len;
1452 history->entries = new;
1454 entry->backtrace = backtrace;
1459 entry->ts_nsec = ktime_get_ns();
1463 topology_ref_history_cmp(const void *a, const void *b)
1465 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1467 if (entry_a->ts_nsec > entry_b->ts_nsec)
1469 else if (entry_a->ts_nsec < entry_b->ts_nsec)
1475 static inline const char *
1476 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1478 if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1485 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1486 void *ptr, const char *type_str)
1488 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1489 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1498 /* First, sort the list so that it goes from oldest to newest
1501 sort(history->entries, history->len, sizeof(*history->entries),
1502 topology_ref_history_cmp, NULL);
1504 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1507 for (i = 0; i < history->len; i++) {
1508 const struct drm_dp_mst_topology_ref_entry *entry =
1509 &history->entries[i];
1512 u64 ts_nsec = entry->ts_nsec;
1513 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1515 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1516 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1518 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1520 topology_ref_type_to_str(entry->type),
1521 ts_nsec, rem_nsec / 1000, buf);
1524 /* Now free the history, since this is the only time we expose it */
1525 kfree(history->entries);
1530 static __always_inline void
1531 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1533 __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1537 static __always_inline void
1538 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1540 __dump_topology_ref_history(&port->topology_ref_history, port,
1544 static __always_inline void
1545 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1546 enum drm_dp_mst_topology_ref_type type)
1548 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1551 static __always_inline void
1552 save_port_topology_ref(struct drm_dp_mst_port *port,
1553 enum drm_dp_mst_topology_ref_type type)
1555 __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1559 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1561 mutex_lock(&mgr->topology_ref_history_lock);
1565 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1567 mutex_unlock(&mgr->topology_ref_history_lock);
1571 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1573 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1575 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1577 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1578 #define save_mstb_topology_ref(mstb, type)
1579 #define save_port_topology_ref(port, type)
1582 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1584 struct drm_dp_mst_branch *mstb =
1585 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1586 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1588 drm_dp_mst_dump_mstb_topology_history(mstb);
1590 INIT_LIST_HEAD(&mstb->destroy_next);
1593 * This can get called under mgr->mutex, so we need to perform the
1594 * actual destruction of the mstb in another worker
1596 mutex_lock(&mgr->delayed_destroy_lock);
1597 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1598 mutex_unlock(&mgr->delayed_destroy_lock);
1599 schedule_work(&mgr->delayed_destroy_work);
1603 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1604 * branch device unless it's zero
1605 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1607 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1608 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1609 * reached 0). Holding a topology reference implies that a malloc reference
1610 * will be held to @mstb as long as the user holds the topology reference.
1612 * Care should be taken to ensure that the user has at least one malloc
1613 * reference to @mstb. If you already have a topology reference to @mstb, you
1614 * should use drm_dp_mst_topology_get_mstb() instead.
1617 * drm_dp_mst_topology_get_mstb()
1618 * drm_dp_mst_topology_put_mstb()
1621 * * 1: A topology reference was grabbed successfully
1622 * * 0: @port is no longer in the topology, no reference was grabbed
1624 static int __must_check
1625 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1629 topology_ref_history_lock(mstb->mgr);
1630 ret = kref_get_unless_zero(&mstb->topology_kref);
1632 DRM_DEBUG("mstb %p (%d)\n",
1633 mstb, kref_read(&mstb->topology_kref));
1634 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1637 topology_ref_history_unlock(mstb->mgr);
1643 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1645 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1647 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1648 * not it's already reached 0. This is only valid to use in scenarios where
1649 * you are already guaranteed to have at least one active topology reference
1650 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1653 * drm_dp_mst_topology_try_get_mstb()
1654 * drm_dp_mst_topology_put_mstb()
1656 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1658 topology_ref_history_lock(mstb->mgr);
1660 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1661 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1662 kref_get(&mstb->topology_kref);
1663 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1665 topology_ref_history_unlock(mstb->mgr);
1669 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1671 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1673 * Releases a topology reference from @mstb by decrementing
1674 * &drm_dp_mst_branch.topology_kref.
1677 * drm_dp_mst_topology_try_get_mstb()
1678 * drm_dp_mst_topology_get_mstb()
1681 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1683 topology_ref_history_lock(mstb->mgr);
1685 DRM_DEBUG("mstb %p (%d)\n",
1686 mstb, kref_read(&mstb->topology_kref) - 1);
1687 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1689 topology_ref_history_unlock(mstb->mgr);
1690 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1693 static void drm_dp_destroy_port(struct kref *kref)
1695 struct drm_dp_mst_port *port =
1696 container_of(kref, struct drm_dp_mst_port, topology_kref);
1697 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1699 drm_dp_mst_dump_port_topology_history(port);
1701 /* There's nothing that needs locking to destroy an input port yet */
1703 drm_dp_mst_put_port_malloc(port);
1707 kfree(port->cached_edid);
1710 * we can't destroy the connector here, as we might be holding the
1711 * mode_config.mutex from an EDID retrieval
1713 mutex_lock(&mgr->delayed_destroy_lock);
1714 list_add(&port->next, &mgr->destroy_port_list);
1715 mutex_unlock(&mgr->delayed_destroy_lock);
1716 schedule_work(&mgr->delayed_destroy_work);
1720 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1721 * port unless it's zero
1722 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1724 * Attempts to grab a topology reference to @port, if it hasn't yet been
1725 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1726 * 0). Holding a topology reference implies that a malloc reference will be
1727 * held to @port as long as the user holds the topology reference.
1729 * Care should be taken to ensure that the user has at least one malloc
1730 * reference to @port. If you already have a topology reference to @port, you
1731 * should use drm_dp_mst_topology_get_port() instead.
1734 * drm_dp_mst_topology_get_port()
1735 * drm_dp_mst_topology_put_port()
1738 * * 1: A topology reference was grabbed successfully
1739 * * 0: @port is no longer in the topology, no reference was grabbed
1741 static int __must_check
1742 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1746 topology_ref_history_lock(port->mgr);
1747 ret = kref_get_unless_zero(&port->topology_kref);
1749 DRM_DEBUG("port %p (%d)\n",
1750 port, kref_read(&port->topology_kref));
1751 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1754 topology_ref_history_unlock(port->mgr);
1759 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1760 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1762 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1763 * not it's already reached 0. This is only valid to use in scenarios where
1764 * you are already guaranteed to have at least one active topology reference
1765 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1768 * drm_dp_mst_topology_try_get_port()
1769 * drm_dp_mst_topology_put_port()
1771 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1773 topology_ref_history_lock(port->mgr);
1775 WARN_ON(kref_read(&port->topology_kref) == 0);
1776 kref_get(&port->topology_kref);
1777 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1778 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1780 topology_ref_history_unlock(port->mgr);
1784 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1785 * @port: The &struct drm_dp_mst_port to release the topology reference from
1787 * Releases a topology reference from @port by decrementing
1788 * &drm_dp_mst_port.topology_kref.
1791 * drm_dp_mst_topology_try_get_port()
1792 * drm_dp_mst_topology_get_port()
1794 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1796 topology_ref_history_lock(port->mgr);
1798 DRM_DEBUG("port %p (%d)\n",
1799 port, kref_read(&port->topology_kref) - 1);
1800 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1802 topology_ref_history_unlock(port->mgr);
1803 kref_put(&port->topology_kref, drm_dp_destroy_port);
1806 static struct drm_dp_mst_branch *
1807 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1808 struct drm_dp_mst_branch *to_find)
1810 struct drm_dp_mst_port *port;
1811 struct drm_dp_mst_branch *rmstb;
1813 if (to_find == mstb)
1816 list_for_each_entry(port, &mstb->ports, next) {
1818 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1819 port->mstb, to_find);
1827 static struct drm_dp_mst_branch *
1828 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1829 struct drm_dp_mst_branch *mstb)
1831 struct drm_dp_mst_branch *rmstb = NULL;
1833 mutex_lock(&mgr->lock);
1834 if (mgr->mst_primary) {
1835 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1836 mgr->mst_primary, mstb);
1838 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1841 mutex_unlock(&mgr->lock);
1845 static struct drm_dp_mst_port *
1846 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1847 struct drm_dp_mst_port *to_find)
1849 struct drm_dp_mst_port *port, *mport;
1851 list_for_each_entry(port, &mstb->ports, next) {
1852 if (port == to_find)
1856 mport = drm_dp_mst_topology_get_port_validated_locked(
1857 port->mstb, to_find);
1865 static struct drm_dp_mst_port *
1866 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1867 struct drm_dp_mst_port *port)
1869 struct drm_dp_mst_port *rport = NULL;
1871 mutex_lock(&mgr->lock);
1872 if (mgr->mst_primary) {
1873 rport = drm_dp_mst_topology_get_port_validated_locked(
1874 mgr->mst_primary, port);
1876 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1879 mutex_unlock(&mgr->lock);
1883 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1885 struct drm_dp_mst_port *port;
1888 list_for_each_entry(port, &mstb->ports, next) {
1889 if (port->port_num == port_num) {
1890 ret = drm_dp_mst_topology_try_get_port(port);
1891 return ret ? port : NULL;
1899 * calculate a new RAD for this MST branch device
1900 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1901 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1903 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1906 int parent_lct = port->parent->lct;
1908 int idx = (parent_lct - 1) / 2;
1909 if (parent_lct > 1) {
1910 memcpy(rad, port->parent->rad, idx + 1);
1911 shift = (parent_lct % 2) ? 4 : 0;
1915 rad[idx] |= port->port_num << shift;
1916 return parent_lct + 1;
1919 static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
1922 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1923 case DP_PEER_DEVICE_SST_SINK:
1925 case DP_PEER_DEVICE_MST_BRANCHING:
1926 /* For sst branch device */
1936 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
1939 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1940 struct drm_dp_mst_branch *mstb;
1944 if (port->pdt == new_pdt && port->mcs == new_mcs)
1947 /* Teardown the old pdt, if there is one */
1948 if (port->pdt != DP_PEER_DEVICE_NONE) {
1949 if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
1951 * If the new PDT would also have an i2c bus,
1952 * don't bother with reregistering it
1954 if (new_pdt != DP_PEER_DEVICE_NONE &&
1955 drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
1956 port->pdt = new_pdt;
1957 port->mcs = new_mcs;
1961 /* remove i2c over sideband */
1962 drm_dp_mst_unregister_i2c_bus(&port->aux);
1964 mutex_lock(&mgr->lock);
1965 drm_dp_mst_topology_put_mstb(port->mstb);
1967 mutex_unlock(&mgr->lock);
1971 port->pdt = new_pdt;
1972 port->mcs = new_mcs;
1974 if (port->pdt != DP_PEER_DEVICE_NONE) {
1975 if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
1976 /* add i2c over sideband */
1977 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1979 lct = drm_dp_calculate_rad(port, rad);
1980 mstb = drm_dp_add_mst_branch_device(lct, rad);
1983 DRM_ERROR("Failed to create MSTB for port %p",
1988 mutex_lock(&mgr->lock);
1990 mstb->mgr = port->mgr;
1991 mstb->port_parent = port;
1994 * Make sure this port's memory allocation stays
1995 * around until its child MSTB releases it
1997 drm_dp_mst_get_port_malloc(port);
1998 mutex_unlock(&mgr->lock);
2000 /* And make sure we send a link address for this */
2007 port->pdt = DP_PEER_DEVICE_NONE;
2012 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2013 * @aux: Fake sideband AUX CH
2014 * @offset: address of the (first) register to read
2015 * @buffer: buffer to store the register values
2016 * @size: number of bytes in @buffer
2018 * Performs the same functionality for remote devices via
2019 * sideband messaging as drm_dp_dpcd_read() does for local
2020 * devices via actual AUX CH.
2022 * Return: Number of bytes read, or negative error code on failure.
2024 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2025 unsigned int offset, void *buffer, size_t size)
2027 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2030 return drm_dp_send_dpcd_read(port->mgr, port,
2031 offset, size, buffer);
2035 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2036 * @aux: Fake sideband AUX CH
2037 * @offset: address of the (first) register to write
2038 * @buffer: buffer containing the values to write
2039 * @size: number of bytes in @buffer
2041 * Performs the same functionality for remote devices via
2042 * sideband messaging as drm_dp_dpcd_write() does for local
2043 * devices via actual AUX CH.
2045 * Return: 0 on success, negative error code on failure.
2047 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2048 unsigned int offset, void *buffer, size_t size)
2050 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2053 return drm_dp_send_dpcd_write(port->mgr, port,
2054 offset, size, buffer);
2057 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2061 memcpy(mstb->guid, guid, 16);
2063 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2064 if (mstb->port_parent) {
2065 ret = drm_dp_send_dpcd_write(
2073 ret = drm_dp_dpcd_write(
2082 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2085 size_t proppath_size)
2089 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2090 for (i = 0; i < (mstb->lct - 1); i++) {
2091 int shift = (i % 2) ? 0 : 4;
2092 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2093 snprintf(temp, sizeof(temp), "-%d", port_num);
2094 strlcat(proppath, temp, proppath_size);
2096 snprintf(temp, sizeof(temp), "-%d", pnum);
2097 strlcat(proppath, temp, proppath_size);
2101 * drm_dp_mst_connector_late_register() - Late MST connector registration
2102 * @connector: The MST connector
2103 * @port: The MST port for this connector
2105 * Helper to register the remote aux device for this MST port. Drivers should
2106 * call this from their mst connector's late_register hook to enable MST aux
2109 * Return: 0 on success, negative error code on failure.
2111 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2112 struct drm_dp_mst_port *port)
2114 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2115 port->aux.name, connector->kdev->kobj.name);
2117 port->aux.dev = connector->kdev;
2118 return drm_dp_aux_register_devnode(&port->aux);
2120 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2123 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2124 * @connector: The MST connector
2125 * @port: The MST port for this connector
2127 * Helper to unregister the remote aux device for this MST port, registered by
2128 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2129 * connector's early_unregister hook.
2131 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2132 struct drm_dp_mst_port *port)
2134 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2135 port->aux.name, connector->kdev->kobj.name);
2136 drm_dp_aux_unregister_devnode(&port->aux);
2138 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2141 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2142 struct drm_dp_mst_port *port)
2144 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2148 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2149 port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2150 if (!port->connector) {
2155 if (port->pdt != DP_PEER_DEVICE_NONE &&
2156 drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
2157 port->cached_edid = drm_get_edid(port->connector,
2159 drm_connector_set_tile_property(port->connector);
2162 mgr->cbs->register_connector(port->connector);
2166 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2170 * Drop a topology reference, and unlink the port from the in-memory topology
2174 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2175 struct drm_dp_mst_port *port)
2177 mutex_lock(&mgr->lock);
2178 list_del(&port->next);
2179 mutex_unlock(&mgr->lock);
2180 drm_dp_mst_topology_put_port(port);
2183 static struct drm_dp_mst_port *
2184 drm_dp_mst_add_port(struct drm_device *dev,
2185 struct drm_dp_mst_topology_mgr *mgr,
2186 struct drm_dp_mst_branch *mstb, u8 port_number)
2188 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2193 kref_init(&port->topology_kref);
2194 kref_init(&port->malloc_kref);
2195 port->parent = mstb;
2196 port->port_num = port_number;
2198 port->aux.name = "DPMST";
2199 port->aux.dev = dev->dev;
2200 port->aux.is_remote = true;
2203 * Make sure the memory allocation for our parent branch stays
2204 * around until our own memory allocation is released
2206 drm_dp_mst_get_mstb_malloc(mstb);
2212 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2213 struct drm_device *dev,
2214 struct drm_dp_link_addr_reply_port *port_msg)
2216 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2217 struct drm_dp_mst_port *port;
2218 int old_ddps = 0, ret;
2219 u8 new_pdt = DP_PEER_DEVICE_NONE;
2221 bool created = false, send_link_addr = false, changed = false;
2223 port = drm_dp_get_port(mstb, port_msg->port_number);
2225 port = drm_dp_mst_add_port(dev, mgr, mstb,
2226 port_msg->port_number);
2231 } else if (!port->input && port_msg->input_port && port->connector) {
2232 /* Since port->connector can't be changed here, we create a
2233 * new port if input_port changes from 0 to 1
2235 drm_dp_mst_topology_unlink_port(mgr, port);
2236 drm_dp_mst_topology_put_port(port);
2237 port = drm_dp_mst_add_port(dev, mgr, mstb,
2238 port_msg->port_number);
2243 } else if (port->input && !port_msg->input_port) {
2245 } else if (port->connector) {
2246 /* We're updating a port that's exposed to userspace, so do it
2249 drm_modeset_lock(&mgr->base.lock, NULL);
2251 old_ddps = port->ddps;
2252 changed = port->ddps != port_msg->ddps ||
2254 (port->ldps != port_msg->legacy_device_plug_status ||
2255 port->dpcd_rev != port_msg->dpcd_revision ||
2256 port->mcs != port_msg->mcs ||
2257 port->pdt != port_msg->peer_device_type ||
2258 port->num_sdp_stream_sinks !=
2259 port_msg->num_sdp_stream_sinks));
2262 port->input = port_msg->input_port;
2264 new_pdt = port_msg->peer_device_type;
2265 new_mcs = port_msg->mcs;
2266 port->ddps = port_msg->ddps;
2267 port->ldps = port_msg->legacy_device_plug_status;
2268 port->dpcd_rev = port_msg->dpcd_revision;
2269 port->num_sdp_streams = port_msg->num_sdp_streams;
2270 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2272 /* manage mstb port lists with mgr lock - take a reference
2275 mutex_lock(&mgr->lock);
2276 drm_dp_mst_topology_get_port(port);
2277 list_add(&port->next, &mstb->ports);
2278 mutex_unlock(&mgr->lock);
2281 if (old_ddps != port->ddps) {
2284 drm_dp_send_enum_path_resources(mgr, mstb,
2288 port->available_pbn = 0;
2292 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2294 send_link_addr = true;
2295 } else if (ret < 0) {
2296 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2302 * If this port wasn't just created, then we're reprobing because
2303 * we're coming out of suspend. In this case, always resend the link
2304 * address if there's an MSTB on this port
2306 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2308 send_link_addr = true;
2310 if (port->connector)
2311 drm_modeset_unlock(&mgr->base.lock);
2312 else if (!port->input)
2313 drm_dp_mst_port_add_connector(mstb, port);
2315 if (send_link_addr && port->mstb) {
2316 ret = drm_dp_send_link_address(mgr, port->mstb);
2317 if (ret == 1) /* MSTB below us changed */
2323 /* put reference to this port */
2324 drm_dp_mst_topology_put_port(port);
2328 drm_dp_mst_topology_unlink_port(mgr, port);
2329 if (port->connector)
2330 drm_modeset_unlock(&mgr->base.lock);
2332 drm_dp_mst_topology_put_port(port);
2337 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2338 struct drm_dp_connection_status_notify *conn_stat)
2340 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2341 struct drm_dp_mst_port *port;
2342 int old_ddps, old_input, ret, i;
2345 bool dowork = false, create_connector = false;
2347 port = drm_dp_get_port(mstb, conn_stat->port_number);
2351 if (port->connector) {
2352 if (!port->input && conn_stat->input_port) {
2354 * We can't remove a connector from an already exposed
2355 * port, so just throw the port out and make sure we
2356 * reprobe the link address of it's parent MSTB
2358 drm_dp_mst_topology_unlink_port(mgr, port);
2359 mstb->link_address_sent = false;
2364 /* Locking is only needed if the port's exposed to userspace */
2365 drm_modeset_lock(&mgr->base.lock, NULL);
2366 } else if (port->input && !conn_stat->input_port) {
2367 create_connector = true;
2368 /* Reprobe link address so we get num_sdp_streams */
2369 mstb->link_address_sent = false;
2373 old_ddps = port->ddps;
2374 old_input = port->input;
2375 port->input = conn_stat->input_port;
2376 port->ldps = conn_stat->legacy_device_plug_status;
2377 port->ddps = conn_stat->displayport_device_plug_status;
2379 if (old_ddps != port->ddps) {
2383 port->available_pbn = 0;
2387 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2388 new_mcs = conn_stat->message_capability_status;
2389 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2392 } else if (ret < 0) {
2393 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2398 if (!old_input && old_ddps != port->ddps && !port->ddps) {
2399 for (i = 0; i < mgr->max_payloads; i++) {
2400 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2401 struct drm_dp_mst_port *port_validated;
2407 container_of(vcpi, struct drm_dp_mst_port, vcpi);
2409 drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2410 if (!port_validated) {
2411 mutex_lock(&mgr->payload_lock);
2412 vcpi->num_slots = 0;
2413 mutex_unlock(&mgr->payload_lock);
2415 drm_dp_mst_topology_put_port(port_validated);
2420 if (port->connector)
2421 drm_modeset_unlock(&mgr->base.lock);
2422 else if (create_connector)
2423 drm_dp_mst_port_add_connector(mstb, port);
2426 drm_dp_mst_topology_put_port(port);
2428 queue_work(system_long_wq, &mstb->mgr->work);
2431 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2434 struct drm_dp_mst_branch *mstb;
2435 struct drm_dp_mst_port *port;
2437 /* find the port by iterating down */
2439 mutex_lock(&mgr->lock);
2440 mstb = mgr->mst_primary;
2445 for (i = 0; i < lct - 1; i++) {
2446 int shift = (i % 2) ? 0 : 4;
2447 int port_num = (rad[i / 2] >> shift) & 0xf;
2449 list_for_each_entry(port, &mstb->ports, next) {
2450 if (port->port_num == port_num) {
2453 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2461 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2465 mutex_unlock(&mgr->lock);
2469 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2470 struct drm_dp_mst_branch *mstb,
2471 const uint8_t *guid)
2473 struct drm_dp_mst_branch *found_mstb;
2474 struct drm_dp_mst_port *port;
2476 if (memcmp(mstb->guid, guid, 16) == 0)
2480 list_for_each_entry(port, &mstb->ports, next) {
2484 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2493 static struct drm_dp_mst_branch *
2494 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2495 const uint8_t *guid)
2497 struct drm_dp_mst_branch *mstb;
2500 /* find the port by iterating down */
2501 mutex_lock(&mgr->lock);
2503 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2505 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2510 mutex_unlock(&mgr->lock);
2514 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2515 struct drm_dp_mst_branch *mstb)
2517 struct drm_dp_mst_port *port;
2519 bool changed = false;
2521 if (!mstb->link_address_sent) {
2522 ret = drm_dp_send_link_address(mgr, mstb);
2529 list_for_each_entry(port, &mstb->ports, next) {
2530 struct drm_dp_mst_branch *mstb_child = NULL;
2532 if (port->input || !port->ddps)
2535 if (!port->available_pbn) {
2536 drm_modeset_lock(&mgr->base.lock, NULL);
2537 drm_dp_send_enum_path_resources(mgr, mstb, port);
2538 drm_modeset_unlock(&mgr->base.lock);
2543 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2547 ret = drm_dp_check_and_send_link_address(mgr,
2549 drm_dp_mst_topology_put_mstb(mstb_child);
2560 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2562 struct drm_dp_mst_topology_mgr *mgr =
2563 container_of(work, struct drm_dp_mst_topology_mgr, work);
2564 struct drm_device *dev = mgr->dev;
2565 struct drm_dp_mst_branch *mstb;
2568 mutex_lock(&mgr->probe_lock);
2570 mutex_lock(&mgr->lock);
2571 mstb = mgr->mst_primary;
2573 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2577 mutex_unlock(&mgr->lock);
2579 mutex_unlock(&mgr->probe_lock);
2583 ret = drm_dp_check_and_send_link_address(mgr, mstb);
2584 drm_dp_mst_topology_put_mstb(mstb);
2586 mutex_unlock(&mgr->probe_lock);
2588 drm_kms_helper_hotplug_event(dev);
2591 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2596 if (memchr_inv(guid, 0, 16))
2599 salt = get_jiffies_64();
2601 memcpy(&guid[0], &salt, sizeof(u64));
2602 memcpy(&guid[8], &salt, sizeof(u64));
2607 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2609 struct drm_dp_sideband_msg_req_body req;
2611 req.req_type = DP_REMOTE_DPCD_READ;
2612 req.u.dpcd_read.port_number = port_num;
2613 req.u.dpcd_read.dpcd_address = offset;
2614 req.u.dpcd_read.num_bytes = num_bytes;
2615 drm_dp_encode_sideband_req(&req, msg);
2620 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2621 bool up, u8 *msg, int len)
2624 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2625 int tosend, total, offset;
2632 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2634 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2637 if (ret != tosend) {
2638 if (ret == -EIO && retries < 5) {
2642 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2648 } while (total > 0);
2652 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2653 struct drm_dp_sideband_msg_tx *txmsg)
2655 struct drm_dp_mst_branch *mstb = txmsg->dst;
2658 /* both msg slots are full */
2659 if (txmsg->seqno == -1) {
2660 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2661 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2664 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2665 txmsg->seqno = mstb->last_seqno;
2666 mstb->last_seqno ^= 1;
2667 } else if (mstb->tx_slots[0] == NULL)
2671 mstb->tx_slots[txmsg->seqno] = txmsg;
2674 req_type = txmsg->msg[0] & 0x7f;
2675 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2676 req_type == DP_RESOURCE_STATUS_NOTIFY)
2680 hdr->path_msg = txmsg->path_msg;
2681 hdr->lct = mstb->lct;
2682 hdr->lcr = mstb->lct - 1;
2684 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2685 hdr->seqno = txmsg->seqno;
2689 * process a single block of the next message in the sideband queue
2691 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2692 struct drm_dp_sideband_msg_tx *txmsg,
2696 struct drm_dp_sideband_msg_hdr hdr;
2697 int len, space, idx, tosend;
2700 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2702 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2704 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2707 /* make hdr from dst mst - for replies use seqno
2708 otherwise assign one */
2709 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2713 /* amount left to send in this message */
2714 len = txmsg->cur_len - txmsg->cur_offset;
2716 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2717 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2719 tosend = min(len, space);
2720 if (len == txmsg->cur_len)
2726 hdr.msg_len = tosend + 1;
2727 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2728 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2729 /* add crc at end */
2730 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2733 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2734 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2735 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2737 drm_printf(&p, "sideband msg failed to send\n");
2738 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2742 txmsg->cur_offset += tosend;
2743 if (txmsg->cur_offset == txmsg->cur_len) {
2744 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2750 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2752 struct drm_dp_sideband_msg_tx *txmsg;
2755 WARN_ON(!mutex_is_locked(&mgr->qlock));
2757 /* construct a chunk from the first msg in the tx_msg queue */
2758 if (list_empty(&mgr->tx_msg_downq))
2761 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2762 ret = process_single_tx_qlock(mgr, txmsg, false);
2764 /* txmsg is sent it should be in the slots now */
2765 mgr->is_waiting_for_dwn_reply = true;
2766 list_del(&txmsg->next);
2768 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2769 mgr->is_waiting_for_dwn_reply = false;
2770 list_del(&txmsg->next);
2771 if (txmsg->seqno != -1)
2772 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2773 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2774 wake_up_all(&mgr->tx_waitq);
2778 /* called holding qlock */
2779 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2780 struct drm_dp_sideband_msg_tx *txmsg)
2784 /* construct a chunk from the first msg in the tx_msg queue */
2785 ret = process_single_tx_qlock(mgr, txmsg, true);
2788 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2790 if (txmsg->seqno != -1) {
2791 WARN_ON((unsigned int)txmsg->seqno >
2792 ARRAY_SIZE(txmsg->dst->tx_slots));
2793 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2797 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2798 struct drm_dp_sideband_msg_tx *txmsg)
2800 mutex_lock(&mgr->qlock);
2801 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2803 if (drm_debug_enabled(DRM_UT_DP)) {
2804 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2806 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2809 if (list_is_singular(&mgr->tx_msg_downq) &&
2810 !mgr->is_waiting_for_dwn_reply)
2811 process_single_down_tx_qlock(mgr);
2812 mutex_unlock(&mgr->qlock);
2816 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2818 struct drm_dp_link_addr_reply_port *port_reply;
2821 for (i = 0; i < reply->nports; i++) {
2822 port_reply = &reply->ports[i];
2823 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2825 port_reply->input_port,
2826 port_reply->peer_device_type,
2827 port_reply->port_number,
2828 port_reply->dpcd_revision,
2831 port_reply->legacy_device_plug_status,
2832 port_reply->num_sdp_streams,
2833 port_reply->num_sdp_stream_sinks);
2837 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2838 struct drm_dp_mst_branch *mstb)
2840 struct drm_dp_sideband_msg_tx *txmsg;
2841 struct drm_dp_link_address_ack_reply *reply;
2842 struct drm_dp_mst_port *port, *tmp;
2843 int i, len, ret, port_mask = 0;
2844 bool changed = false;
2846 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2851 len = build_link_address(txmsg);
2853 mstb->link_address_sent = true;
2854 drm_dp_queue_down_tx(mgr, txmsg);
2856 /* FIXME: Actually do some real error handling here */
2857 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2859 DRM_ERROR("Sending link address failed with %d\n", ret);
2862 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2863 DRM_ERROR("link address NAK received\n");
2868 reply = &txmsg->reply.u.link_addr;
2869 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2870 drm_dp_dump_link_address(reply);
2872 drm_dp_check_mstb_guid(mstb, reply->guid);
2874 for (i = 0; i < reply->nports; i++) {
2875 port_mask |= BIT(reply->ports[i].port_number);
2876 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2884 /* Prune any ports that are currently a part of mstb in our in-memory
2885 * topology, but were not seen in this link address. Usually this
2886 * means that they were removed while the topology was out of sync,
2887 * e.g. during suspend/resume
2889 mutex_lock(&mgr->lock);
2890 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2891 if (port_mask & BIT(port->port_num))
2894 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2896 list_del(&port->next);
2897 drm_dp_mst_topology_put_port(port);
2900 mutex_unlock(&mgr->lock);
2904 mstb->link_address_sent = false;
2906 return ret < 0 ? ret : changed;
2910 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2911 struct drm_dp_mst_branch *mstb,
2912 struct drm_dp_mst_port *port)
2914 struct drm_dp_enum_path_resources_ack_reply *path_res;
2915 struct drm_dp_sideband_msg_tx *txmsg;
2919 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2924 len = build_enum_path_resources(txmsg, port->port_num);
2926 drm_dp_queue_down_tx(mgr, txmsg);
2928 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2930 path_res = &txmsg->reply.u.path_resources;
2932 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2933 DRM_DEBUG_KMS("enum path resources nak received\n");
2935 if (port->port_num != path_res->port_number)
2936 DRM_ERROR("got incorrect port in response\n");
2938 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2939 path_res->port_number,
2940 path_res->full_payload_bw_number,
2941 path_res->avail_payload_bw_number);
2942 port->available_pbn =
2943 path_res->avail_payload_bw_number;
2951 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2953 if (!mstb->port_parent)
2956 if (mstb->port_parent->mstb != mstb)
2957 return mstb->port_parent;
2959 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2963 * Searches upwards in the topology starting from mstb to try to find the
2964 * closest available parent of mstb that's still connected to the rest of the
2965 * topology. This can be used in order to perform operations like releasing
2966 * payloads, where the branch device which owned the payload may no longer be
2967 * around and thus would require that the payload on the last living relative
2970 static struct drm_dp_mst_branch *
2971 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2972 struct drm_dp_mst_branch *mstb,
2975 struct drm_dp_mst_branch *rmstb = NULL;
2976 struct drm_dp_mst_port *found_port;
2978 mutex_lock(&mgr->lock);
2979 if (!mgr->mst_primary)
2983 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2987 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2988 rmstb = found_port->parent;
2989 *port_num = found_port->port_num;
2991 /* Search again, starting from this parent */
2992 mstb = found_port->parent;
2996 mutex_unlock(&mgr->lock);
3000 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3001 struct drm_dp_mst_port *port,
3005 struct drm_dp_sideband_msg_tx *txmsg;
3006 struct drm_dp_mst_branch *mstb;
3007 int len, ret, port_num;
3008 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3011 port_num = port->port_num;
3012 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3014 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3022 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3028 for (i = 0; i < port->num_sdp_streams; i++)
3032 len = build_allocate_payload(txmsg, port_num,
3034 pbn, port->num_sdp_streams, sinks);
3036 drm_dp_queue_down_tx(mgr, txmsg);
3039 * FIXME: there is a small chance that between getting the last
3040 * connected mstb and sending the payload message, the last connected
3041 * mstb could also be removed from the topology. In the future, this
3042 * needs to be fixed by restarting the
3043 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3044 * timeout if the topology is still connected to the system.
3046 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3048 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3055 drm_dp_mst_topology_put_mstb(mstb);
3059 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3060 struct drm_dp_mst_port *port, bool power_up)
3062 struct drm_dp_sideband_msg_tx *txmsg;
3065 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3069 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3071 drm_dp_mst_topology_put_port(port);
3075 txmsg->dst = port->parent;
3076 len = build_power_updown_phy(txmsg, port->port_num, power_up);
3077 drm_dp_queue_down_tx(mgr, txmsg);
3079 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3081 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3087 drm_dp_mst_topology_put_port(port);
3091 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3093 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3095 struct drm_dp_payload *payload)
3099 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3101 payload->payload_state = 0;
3104 payload->payload_state = DP_PAYLOAD_LOCAL;
3108 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3109 struct drm_dp_mst_port *port,
3111 struct drm_dp_payload *payload)
3114 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3117 payload->payload_state = DP_PAYLOAD_REMOTE;
3121 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3122 struct drm_dp_mst_port *port,
3124 struct drm_dp_payload *payload)
3126 DRM_DEBUG_KMS("\n");
3127 /* it's okay for these to fail */
3129 drm_dp_payload_send_msg(mgr, port, id, 0);
3132 drm_dp_dpcd_write_payload(mgr, id, payload);
3133 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3137 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3139 struct drm_dp_payload *payload)
3141 payload->payload_state = 0;
3146 * drm_dp_update_payload_part1() - Execute payload update part 1
3147 * @mgr: manager to use.
3149 * This iterates over all proposed virtual channels, and tries to
3150 * allocate space in the link for them. For 0->slots transitions,
3151 * this step just writes the VCPI to the MST device. For slots->0
3152 * transitions, this writes the updated VCPIs and removes the
3153 * remote VC payloads.
3155 * after calling this the driver should generate ACT and payload
3158 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3160 struct drm_dp_payload req_payload;
3161 struct drm_dp_mst_port *port;
3165 mutex_lock(&mgr->payload_lock);
3166 for (i = 0; i < mgr->max_payloads; i++) {
3167 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3168 struct drm_dp_payload *payload = &mgr->payloads[i];
3169 bool put_port = false;
3171 /* solve the current payloads - compare to the hw ones
3172 - update the hw view */
3173 req_payload.start_slot = cur_slots;
3175 port = container_of(vcpi, struct drm_dp_mst_port,
3178 /* Validated ports don't matter if we're releasing
3181 if (vcpi->num_slots) {
3182 port = drm_dp_mst_topology_get_port_validated(
3185 mutex_unlock(&mgr->payload_lock);
3191 req_payload.num_slots = vcpi->num_slots;
3192 req_payload.vcpi = vcpi->vcpi;
3195 req_payload.num_slots = 0;
3198 payload->start_slot = req_payload.start_slot;
3199 /* work out what is required to happen with this payload */
3200 if (payload->num_slots != req_payload.num_slots) {
3202 /* need to push an update for this payload */
3203 if (req_payload.num_slots) {
3204 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3206 payload->num_slots = req_payload.num_slots;
3207 payload->vcpi = req_payload.vcpi;
3209 } else if (payload->num_slots) {
3210 payload->num_slots = 0;
3211 drm_dp_destroy_payload_step1(mgr, port,
3214 req_payload.payload_state =
3215 payload->payload_state;
3216 payload->start_slot = 0;
3218 payload->payload_state = req_payload.payload_state;
3220 cur_slots += req_payload.num_slots;
3223 drm_dp_mst_topology_put_port(port);
3226 for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3227 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3232 DRM_DEBUG_KMS("removing payload %d\n", i);
3233 for (j = i; j < mgr->max_payloads - 1; j++) {
3234 mgr->payloads[j] = mgr->payloads[j + 1];
3235 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3237 if (mgr->proposed_vcpis[j] &&
3238 mgr->proposed_vcpis[j]->num_slots) {
3239 set_bit(j + 1, &mgr->payload_mask);
3241 clear_bit(j + 1, &mgr->payload_mask);
3245 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3246 sizeof(struct drm_dp_payload));
3247 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3248 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3250 mutex_unlock(&mgr->payload_lock);
3254 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3257 * drm_dp_update_payload_part2() - Execute payload update part 2
3258 * @mgr: manager to use.
3260 * This iterates over all proposed virtual channels, and tries to
3261 * allocate space in the link for them. For 0->slots transitions,
3262 * this step writes the remote VC payload commands. For slots->0
3263 * this just resets some internal state.
3265 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3267 struct drm_dp_mst_port *port;
3270 mutex_lock(&mgr->payload_lock);
3271 for (i = 0; i < mgr->max_payloads; i++) {
3273 if (!mgr->proposed_vcpis[i])
3276 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3278 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3279 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3280 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3281 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3282 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3285 mutex_unlock(&mgr->payload_lock);
3289 mutex_unlock(&mgr->payload_lock);
3292 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3294 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3295 struct drm_dp_mst_port *port,
3296 int offset, int size, u8 *bytes)
3300 struct drm_dp_sideband_msg_tx *txmsg;
3301 struct drm_dp_mst_branch *mstb;
3303 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3307 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3313 len = build_dpcd_read(txmsg, port->port_num, offset, size);
3314 txmsg->dst = port->parent;
3316 drm_dp_queue_down_tx(mgr, txmsg);
3318 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3322 /* DPCD read should never be NACKed */
3323 if (txmsg->reply.reply_type == 1) {
3324 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3325 mstb, port->port_num, offset, size);
3330 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3335 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3337 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3342 drm_dp_mst_topology_put_mstb(mstb);
3347 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3348 struct drm_dp_mst_port *port,
3349 int offset, int size, u8 *bytes)
3353 struct drm_dp_sideband_msg_tx *txmsg;
3354 struct drm_dp_mst_branch *mstb;
3356 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3360 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3366 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3369 drm_dp_queue_down_tx(mgr, txmsg);
3371 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3373 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3380 drm_dp_mst_topology_put_mstb(mstb);
3384 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3386 struct drm_dp_sideband_msg_reply_body reply;
3388 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3389 reply.req_type = req_type;
3390 drm_dp_encode_sideband_reply(&reply, msg);
3394 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3395 struct drm_dp_mst_branch *mstb,
3396 int req_type, int seqno, bool broadcast)
3398 struct drm_dp_sideband_msg_tx *txmsg;
3400 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3405 txmsg->seqno = seqno;
3406 drm_dp_encode_up_ack_reply(txmsg, req_type);
3408 mutex_lock(&mgr->qlock);
3410 process_single_up_tx_qlock(mgr, txmsg);
3412 mutex_unlock(&mgr->qlock);
3418 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
3420 if (dp_link_bw == 0 || dp_link_count == 0)
3421 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3422 dp_link_bw, dp_link_count);
3424 return dp_link_bw * dp_link_count / 2;
3428 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3429 * @mgr: manager to set state for
3430 * @mst_state: true to enable MST on this connector - false to disable.
3432 * This is called by the driver when it detects an MST capable device plugged
3433 * into a DP MST capable port, or when a DP MST capable device is unplugged.
3435 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3438 struct drm_dp_mst_branch *mstb = NULL;
3440 mutex_lock(&mgr->lock);
3441 if (mst_state == mgr->mst_state)
3444 mgr->mst_state = mst_state;
3445 /* set the device into MST mode */
3447 WARN_ON(mgr->mst_primary);
3450 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3451 if (ret != DP_RECEIVER_CAP_SIZE) {
3452 DRM_DEBUG_KMS("failed to read DPCD\n");
3456 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3457 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3458 if (mgr->pbn_div == 0) {
3463 /* add initial branch device at LCT 1 */
3464 mstb = drm_dp_add_mst_branch_device(1, NULL);
3471 /* give this the main reference */
3472 mgr->mst_primary = mstb;
3473 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3475 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3476 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3482 struct drm_dp_payload reset_pay;
3483 reset_pay.start_slot = 0;
3484 reset_pay.num_slots = 0x3f;
3485 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3488 queue_work(system_long_wq, &mgr->work);
3492 /* disable MST on the device */
3493 mstb = mgr->mst_primary;
3494 mgr->mst_primary = NULL;
3495 /* this can fail if the device is gone */
3496 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3498 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3499 mgr->payload_mask = 0;
3500 set_bit(0, &mgr->payload_mask);
3505 mutex_unlock(&mgr->lock);
3507 drm_dp_mst_topology_put_mstb(mstb);
3511 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3514 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3516 struct drm_dp_mst_port *port;
3518 /* The link address will need to be re-sent on resume */
3519 mstb->link_address_sent = false;
3521 list_for_each_entry(port, &mstb->ports, next) {
3522 /* The PBN for each port will also need to be re-probed */
3523 port->available_pbn = 0;
3526 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3531 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3532 * @mgr: manager to suspend
3534 * This function tells the MST device that we can't handle UP messages
3535 * anymore. This should stop it from sending any since we are suspended.
3537 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3539 mutex_lock(&mgr->lock);
3540 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3541 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3542 mutex_unlock(&mgr->lock);
3543 flush_work(&mgr->up_req_work);
3544 flush_work(&mgr->work);
3545 flush_work(&mgr->delayed_destroy_work);
3547 mutex_lock(&mgr->lock);
3548 if (mgr->mst_state && mgr->mst_primary)
3549 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3550 mutex_unlock(&mgr->lock);
3552 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3555 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3556 * @mgr: manager to resume
3557 * @sync: whether or not to perform topology reprobing synchronously
3559 * This will fetch DPCD and see if the device is still there,
3560 * if it is, it will rewrite the MSTM control bits, and return.
3562 * If the device fails this returns -1, and the driver should do
3563 * a full MST reprobe, in case we were undocked.
3565 * During system resume (where it is assumed that the driver will be calling
3566 * drm_atomic_helper_resume()) this function should be called beforehand with
3567 * @sync set to true. In contexts like runtime resume where the driver is not
3568 * expected to be calling drm_atomic_helper_resume(), this function should be
3569 * called with @sync set to false in order to avoid deadlocking.
3571 * Returns: -1 if the MST topology was removed while we were suspended, 0
3574 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3580 mutex_lock(&mgr->lock);
3581 if (!mgr->mst_primary)
3584 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3585 DP_RECEIVER_CAP_SIZE);
3586 if (ret != DP_RECEIVER_CAP_SIZE) {
3587 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3591 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3594 DP_UPSTREAM_IS_SRC);
3596 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3600 /* Some hubs forget their guids after they resume */
3601 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3603 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3606 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3609 * For the final step of resuming the topology, we need to bring the
3610 * state of our in-memory topology back into sync with reality. So,
3611 * restart the probing process as if we're probing a new hub
3613 queue_work(system_long_wq, &mgr->work);
3614 mutex_unlock(&mgr->lock);
3617 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3618 flush_work(&mgr->work);
3624 mutex_unlock(&mgr->lock);
3627 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3629 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3633 int replylen, origlen, curreply;
3635 struct drm_dp_sideband_msg_rx *msg;
3636 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3637 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3639 len = min(mgr->max_dpcd_transaction_bytes, 16);
3640 ret = drm_dp_dpcd_read(mgr->aux, basereg,
3643 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3646 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3648 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3651 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3656 while (replylen > 0) {
3657 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3658 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3661 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3666 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3668 DRM_DEBUG_KMS("failed to build sideband msg\n");
3678 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3680 struct drm_dp_sideband_msg_tx *txmsg;
3681 struct drm_dp_mst_branch *mstb;
3682 struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3685 if (!drm_dp_get_one_sb_msg(mgr, false))
3686 goto clear_down_rep_recv;
3688 if (!mgr->down_rep_recv.have_eomt)
3691 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3693 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3695 goto clear_down_rep_recv;
3698 /* find the message */
3700 mutex_lock(&mgr->qlock);
3701 txmsg = mstb->tx_slots[slot];
3702 /* remove from slots */
3703 mutex_unlock(&mgr->qlock);
3706 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3707 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3708 mgr->down_rep_recv.msg[0]);
3712 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3714 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3715 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3716 txmsg->reply.req_type,
3717 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3718 txmsg->reply.u.nak.reason,
3719 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3720 txmsg->reply.u.nak.nak_data);
3722 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3723 drm_dp_mst_topology_put_mstb(mstb);
3725 mutex_lock(&mgr->qlock);
3726 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3727 mstb->tx_slots[slot] = NULL;
3728 mgr->is_waiting_for_dwn_reply = false;
3729 mutex_unlock(&mgr->qlock);
3731 wake_up_all(&mgr->tx_waitq);
3736 drm_dp_mst_topology_put_mstb(mstb);
3737 clear_down_rep_recv:
3738 mutex_lock(&mgr->qlock);
3739 mgr->is_waiting_for_dwn_reply = false;
3740 mutex_unlock(&mgr->qlock);
3741 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3747 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3748 struct drm_dp_pending_up_req *up_req)
3750 struct drm_dp_mst_branch *mstb = NULL;
3751 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3752 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3753 bool hotplug = false;
3755 if (hdr->broadcast) {
3756 const u8 *guid = NULL;
3758 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3759 guid = msg->u.conn_stat.guid;
3760 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3761 guid = msg->u.resource_stat.guid;
3764 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3766 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3770 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3775 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3776 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3777 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3781 drm_dp_mst_topology_put_mstb(mstb);
3785 static void drm_dp_mst_up_req_work(struct work_struct *work)
3787 struct drm_dp_mst_topology_mgr *mgr =
3788 container_of(work, struct drm_dp_mst_topology_mgr,
3790 struct drm_dp_pending_up_req *up_req;
3791 bool send_hotplug = false;
3793 mutex_lock(&mgr->probe_lock);
3795 mutex_lock(&mgr->up_req_lock);
3796 up_req = list_first_entry_or_null(&mgr->up_req_list,
3797 struct drm_dp_pending_up_req,
3800 list_del(&up_req->next);
3801 mutex_unlock(&mgr->up_req_lock);
3806 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3809 mutex_unlock(&mgr->probe_lock);
3812 drm_kms_helper_hotplug_event(mgr->dev);
3815 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3817 struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3818 struct drm_dp_pending_up_req *up_req;
3821 if (!drm_dp_get_one_sb_msg(mgr, true))
3824 if (!mgr->up_req_recv.have_eomt)
3827 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3829 DRM_ERROR("Not enough memory to process MST up req\n");
3832 INIT_LIST_HEAD(&up_req->next);
3835 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3837 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3838 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3839 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3840 up_req->msg.req_type);
3845 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3848 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3849 const struct drm_dp_connection_status_notify *conn_stat =
3850 &up_req->msg.u.conn_stat;
3852 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3853 conn_stat->port_number,
3854 conn_stat->legacy_device_plug_status,
3855 conn_stat->displayport_device_plug_status,
3856 conn_stat->message_capability_status,
3857 conn_stat->input_port,
3858 conn_stat->peer_device_type);
3859 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3860 const struct drm_dp_resource_status_notify *res_stat =
3861 &up_req->msg.u.resource_stat;
3863 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3864 res_stat->port_number,
3865 res_stat->available_pbn);
3869 mutex_lock(&mgr->up_req_lock);
3870 list_add_tail(&up_req->next, &mgr->up_req_list);
3871 mutex_unlock(&mgr->up_req_lock);
3872 queue_work(system_long_wq, &mgr->up_req_work);
3875 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3880 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3881 * @mgr: manager to notify irq for.
3882 * @esi: 4 bytes from SINK_COUNT_ESI
3883 * @handled: whether the hpd interrupt was consumed or not
3885 * This should be called from the driver when it detects a short IRQ,
3886 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3887 * topology manager will process the sideband messages received as a result
3890 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3897 if (sc != mgr->sink_count) {
3898 mgr->sink_count = sc;
3902 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3903 ret = drm_dp_mst_handle_down_rep(mgr);
3907 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3908 ret |= drm_dp_mst_handle_up_req(mgr);
3912 drm_dp_mst_kick_tx(mgr);
3915 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3918 * drm_dp_mst_detect_port() - get connection status for an MST port
3919 * @connector: DRM connector for this port
3920 * @ctx: The acquisition context to use for grabbing locks
3921 * @mgr: manager for this port
3922 * @port: pointer to a port
3924 * This returns the current connection state for a port.
3927 drm_dp_mst_detect_port(struct drm_connector *connector,
3928 struct drm_modeset_acquire_ctx *ctx,
3929 struct drm_dp_mst_topology_mgr *mgr,
3930 struct drm_dp_mst_port *port)
3934 /* we need to search for the port in the mgr in case it's gone */
3935 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3937 return connector_status_disconnected;
3939 ret = drm_modeset_lock(&mgr->base.lock, ctx);
3943 ret = connector_status_disconnected;
3948 switch (port->pdt) {
3949 case DP_PEER_DEVICE_NONE:
3950 case DP_PEER_DEVICE_MST_BRANCHING:
3952 ret = connector_status_connected;
3955 case DP_PEER_DEVICE_SST_SINK:
3956 ret = connector_status_connected;
3957 /* for logical ports - cache the EDID */
3958 if (port->port_num >= 8 && !port->cached_edid) {
3959 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3962 case DP_PEER_DEVICE_DP_LEGACY_CONV:
3964 ret = connector_status_connected;
3968 drm_dp_mst_topology_put_port(port);
3971 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3974 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3975 * @mgr: manager for this port
3976 * @port: unverified pointer to a port.
3978 * This returns whether the port supports audio or not.
3980 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3981 struct drm_dp_mst_port *port)
3985 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3988 ret = port->has_audio;
3989 drm_dp_mst_topology_put_port(port);
3992 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3995 * drm_dp_mst_get_edid() - get EDID for an MST port
3996 * @connector: toplevel connector to get EDID for
3997 * @mgr: manager for this port
3998 * @port: unverified pointer to a port.
4000 * This returns an EDID for the port connected to a connector,
4001 * It validates the pointer still exists so the caller doesn't require a
4004 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4006 struct edid *edid = NULL;
4008 /* we need to search for the port in the mgr in case it's gone */
4009 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4013 if (port->cached_edid)
4014 edid = drm_edid_duplicate(port->cached_edid);
4016 edid = drm_get_edid(connector, &port->aux.ddc);
4018 port->has_audio = drm_detect_monitor_audio(edid);
4019 drm_dp_mst_topology_put_port(port);
4022 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4025 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4026 * @mgr: manager to use
4027 * @pbn: payload bandwidth to convert into slots.
4029 * Calculate the number of VCPI slots that will be required for the given PBN
4030 * value. This function is deprecated, and should not be used in atomic
4034 * The total slots required for this port, or error.
4036 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4041 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4043 /* max. time slots - one slot for MTP header */
4048 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4050 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4051 struct drm_dp_vcpi *vcpi, int pbn, int slots)
4055 /* max. time slots - one slot for MTP header */
4060 vcpi->aligned_pbn = slots * mgr->pbn_div;
4061 vcpi->num_slots = slots;
4063 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4070 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4071 * @state: global atomic state
4072 * @mgr: MST topology manager for the port
4073 * @port: port to find vcpi slots for
4074 * @pbn: bandwidth required for the mode in PBN
4076 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4077 * may have had. Any atomic drivers which support MST must call this function
4078 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4079 * current VCPI allocation for the new state, but only when
4080 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4081 * to ensure compatibility with userspace applications that still use the
4082 * legacy modesetting UAPI.
4084 * Allocations set by this function are not checked against the bandwidth
4085 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4087 * Additionally, it is OK to call this function multiple times on the same
4088 * @port as needed. It is not OK however, to call this function and
4089 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4092 * drm_dp_atomic_release_vcpi_slots()
4093 * drm_dp_mst_atomic_check()
4096 * Total slots in the atomic state assigned for this port, or a negative error
4097 * code if the port no longer exists
4099 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4100 struct drm_dp_mst_topology_mgr *mgr,
4101 struct drm_dp_mst_port *port, int pbn)
4103 struct drm_dp_mst_topology_state *topology_state;
4104 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4105 int prev_slots, req_slots;
4107 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4108 if (IS_ERR(topology_state))
4109 return PTR_ERR(topology_state);
4111 /* Find the current allocation for this port, if any */
4112 list_for_each_entry(pos, &topology_state->vcpis, next) {
4113 if (pos->port == port) {
4115 prev_slots = vcpi->vcpi;
4118 * This should never happen, unless the driver tries
4119 * releasing and allocating the same VCPI allocation,
4122 if (WARN_ON(!prev_slots)) {
4123 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4134 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4136 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4137 port->connector->base.id, port->connector->name,
4138 port, prev_slots, req_slots);
4140 /* Add the new allocation to the state */
4142 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4146 drm_dp_mst_get_port_malloc(port);
4148 list_add(&vcpi->next, &topology_state->vcpis);
4150 vcpi->vcpi = req_slots;
4154 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4157 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4158 * @state: global atomic state
4159 * @mgr: MST topology manager for the port
4160 * @port: The port to release the VCPI slots from
4162 * Releases any VCPI slots that have been allocated to a port in the atomic
4163 * state. Any atomic drivers which support MST must call this function in
4164 * their &drm_connector_helper_funcs.atomic_check() callback when the
4165 * connector will no longer have VCPI allocated (e.g. because its CRTC was
4166 * removed) when it had VCPI allocated in the previous atomic state.
4168 * It is OK to call this even if @port has been removed from the system.
4169 * Additionally, it is OK to call this function multiple times on the same
4170 * @port as needed. It is not OK however, to call this function and
4171 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4175 * drm_dp_atomic_find_vcpi_slots()
4176 * drm_dp_mst_atomic_check()
4179 * 0 if all slots for this port were added back to
4180 * &drm_dp_mst_topology_state.avail_slots or negative error code
4182 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4183 struct drm_dp_mst_topology_mgr *mgr,
4184 struct drm_dp_mst_port *port)
4186 struct drm_dp_mst_topology_state *topology_state;
4187 struct drm_dp_vcpi_allocation *pos;
4190 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4191 if (IS_ERR(topology_state))
4192 return PTR_ERR(topology_state);
4194 list_for_each_entry(pos, &topology_state->vcpis, next) {
4195 if (pos->port == port) {
4200 if (WARN_ON(!found)) {
4201 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4202 port, &topology_state->base);
4206 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4208 drm_dp_mst_put_port_malloc(port);
4214 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4217 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4218 * @mgr: manager for this port
4219 * @port: port to allocate a virtual channel for.
4220 * @pbn: payload bandwidth number to request
4221 * @slots: returned number of slots for this PBN.
4223 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4224 struct drm_dp_mst_port *port, int pbn, int slots)
4228 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4235 if (port->vcpi.vcpi > 0) {
4236 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4237 port->vcpi.vcpi, port->vcpi.pbn, pbn);
4238 if (pbn == port->vcpi.pbn) {
4239 drm_dp_mst_topology_put_port(port);
4244 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4246 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4247 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4250 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4251 pbn, port->vcpi.num_slots);
4253 /* Keep port allocated until its payload has been removed */
4254 drm_dp_mst_get_port_malloc(port);
4255 drm_dp_mst_topology_put_port(port);
4260 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4262 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4265 port = drm_dp_mst_topology_get_port_validated(mgr, port);
4269 slots = port->vcpi.num_slots;
4270 drm_dp_mst_topology_put_port(port);
4273 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4276 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4277 * @mgr: manager for this port
4278 * @port: unverified pointer to a port.
4280 * This just resets the number of slots for the ports VCPI for later programming.
4282 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4285 * A port with VCPI will remain allocated until its VCPI is
4286 * released, no verified ref needed
4289 port->vcpi.num_slots = 0;
4291 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4294 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4295 * @mgr: manager for this port
4296 * @port: port to deallocate vcpi for
4298 * This can be called unconditionally, regardless of whether
4299 * drm_dp_mst_allocate_vcpi() succeeded or not.
4301 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4302 struct drm_dp_mst_port *port)
4304 if (!port->vcpi.vcpi)
4307 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4308 port->vcpi.num_slots = 0;
4310 port->vcpi.aligned_pbn = 0;
4311 port->vcpi.vcpi = 0;
4312 drm_dp_mst_put_port_malloc(port);
4314 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4316 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4317 int id, struct drm_dp_payload *payload)
4319 u8 payload_alloc[3], status;
4323 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4324 DP_PAYLOAD_TABLE_UPDATED);
4326 payload_alloc[0] = id;
4327 payload_alloc[1] = payload->start_slot;
4328 payload_alloc[2] = payload->num_slots;
4330 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4332 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4337 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4339 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4343 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4346 usleep_range(10000, 20000);
4349 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4360 * drm_dp_check_act_status() - Check ACT handled status.
4361 * @mgr: manager to use
4363 * Check the payload status bits in the DPCD for ACT handled completion.
4365 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4372 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4375 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4379 if (status & DP_PAYLOAD_ACT_HANDLED)
4384 } while (count < 30);
4386 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
4387 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
4395 EXPORT_SYMBOL(drm_dp_check_act_status);
4398 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4399 * @clock: dot clock for the mode
4400 * @bpp: bpp for the mode.
4402 * This uses the formula in the spec to calculate the PBN value for a mode.
4404 int drm_dp_calc_pbn_mode(int clock, int bpp)
4407 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4408 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4409 * common multiplier to render an integer PBN for all link rate/lane
4410 * counts combinations
4412 * peak_kbps *= (1006/1000)
4413 * peak_kbps *= (64/54)
4414 * peak_kbps *= 8 convert to bytes
4416 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4417 8 * 54 * 1000 * 1000);
4419 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4421 /* we want to kick the TX after we've ack the up/down IRQs. */
4422 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4424 queue_work(system_long_wq, &mgr->tx_work);
4427 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4428 struct drm_dp_mst_branch *mstb)
4430 struct drm_dp_mst_port *port;
4431 int tabs = mstb->lct;
4435 for (i = 0; i < tabs; i++)
4439 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4440 list_for_each_entry(port, &mstb->ports, next) {
4441 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4443 drm_dp_mst_dump_mstb(m, port->mstb);
4447 #define DP_PAYLOAD_TABLE_SIZE 64
4449 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4454 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4455 if (drm_dp_dpcd_read(mgr->aux,
4456 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4463 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4464 struct drm_dp_mst_port *port, char *name,
4467 struct edid *mst_edid;
4469 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4470 drm_edid_get_monitor_name(mst_edid, name, namelen);
4474 * drm_dp_mst_dump_topology(): dump topology to seq file.
4475 * @m: seq_file to dump output to
4476 * @mgr: manager to dump current topology for.
4478 * helper to dump MST topology to a seq file for debugfs.
4480 void drm_dp_mst_dump_topology(struct seq_file *m,
4481 struct drm_dp_mst_topology_mgr *mgr)
4484 struct drm_dp_mst_port *port;
4486 mutex_lock(&mgr->lock);
4487 if (mgr->mst_primary)
4488 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4491 mutex_unlock(&mgr->lock);
4493 mutex_lock(&mgr->payload_lock);
4494 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4497 for (i = 0; i < mgr->max_payloads; i++) {
4498 if (mgr->proposed_vcpis[i]) {
4501 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4502 fetch_monitor_name(mgr, port, name, sizeof(name));
4503 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4504 port->port_num, port->vcpi.vcpi,
4505 port->vcpi.num_slots,
4506 (*name != 0) ? name : "Unknown");
4508 seq_printf(m, "vcpi %d:unused\n", i);
4510 for (i = 0; i < mgr->max_payloads; i++) {
4511 seq_printf(m, "payload %d: %d, %d, %d\n",
4513 mgr->payloads[i].payload_state,
4514 mgr->payloads[i].start_slot,
4515 mgr->payloads[i].num_slots);
4519 mutex_unlock(&mgr->payload_lock);
4521 mutex_lock(&mgr->lock);
4522 if (mgr->mst_primary) {
4523 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4526 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4527 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4528 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4529 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4530 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4531 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4533 /* dump the standard OUI branch header */
4534 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4535 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4536 for (i = 0x3; i < 0x8 && buf[i]; i++)
4537 seq_printf(m, "%c", buf[i]);
4538 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4539 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4540 if (dump_dp_payload_table(mgr, buf))
4541 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4544 mutex_unlock(&mgr->lock);
4547 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4549 static void drm_dp_tx_work(struct work_struct *work)
4551 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4553 mutex_lock(&mgr->qlock);
4554 if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
4555 process_single_down_tx_qlock(mgr);
4556 mutex_unlock(&mgr->qlock);
4560 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4562 if (port->connector)
4563 port->mgr->cbs->destroy_connector(port->mgr, port->connector);
4565 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4566 drm_dp_mst_put_port_malloc(port);
4570 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4572 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4573 struct drm_dp_mst_port *port, *tmp;
4574 bool wake_tx = false;
4576 mutex_lock(&mgr->lock);
4577 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
4578 list_del(&port->next);
4579 drm_dp_mst_topology_put_port(port);
4581 mutex_unlock(&mgr->lock);
4583 /* drop any tx slots msg */
4584 mutex_lock(&mstb->mgr->qlock);
4585 if (mstb->tx_slots[0]) {
4586 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4587 mstb->tx_slots[0] = NULL;
4590 if (mstb->tx_slots[1]) {
4591 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4592 mstb->tx_slots[1] = NULL;
4595 mutex_unlock(&mstb->mgr->qlock);
4598 wake_up_all(&mstb->mgr->tx_waitq);
4600 drm_dp_mst_put_mstb_malloc(mstb);
4603 static void drm_dp_delayed_destroy_work(struct work_struct *work)
4605 struct drm_dp_mst_topology_mgr *mgr =
4606 container_of(work, struct drm_dp_mst_topology_mgr,
4607 delayed_destroy_work);
4608 bool send_hotplug = false, go_again;
4611 * Not a regular list traverse as we have to drop the destroy
4612 * connector lock before destroying the mstb/port, to avoid AB->BA
4613 * ordering between this lock and the config mutex.
4619 struct drm_dp_mst_branch *mstb;
4621 mutex_lock(&mgr->delayed_destroy_lock);
4622 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4623 struct drm_dp_mst_branch,
4626 list_del(&mstb->destroy_next);
4627 mutex_unlock(&mgr->delayed_destroy_lock);
4632 drm_dp_delayed_destroy_mstb(mstb);
4637 struct drm_dp_mst_port *port;
4639 mutex_lock(&mgr->delayed_destroy_lock);
4640 port = list_first_entry_or_null(&mgr->destroy_port_list,
4641 struct drm_dp_mst_port,
4644 list_del(&port->next);
4645 mutex_unlock(&mgr->delayed_destroy_lock);
4650 drm_dp_delayed_destroy_port(port);
4651 send_hotplug = true;
4657 drm_kms_helper_hotplug_event(mgr->dev);
4660 static struct drm_private_state *
4661 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4663 struct drm_dp_mst_topology_state *state, *old_state =
4664 to_dp_mst_topology_state(obj->state);
4665 struct drm_dp_vcpi_allocation *pos, *vcpi;
4667 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4671 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4673 INIT_LIST_HEAD(&state->vcpis);
4675 list_for_each_entry(pos, &old_state->vcpis, next) {
4676 /* Prune leftover freed VCPI allocations */
4680 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4684 drm_dp_mst_get_port_malloc(vcpi->port);
4685 list_add(&vcpi->next, &state->vcpis);
4688 return &state->base;
4691 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4692 drm_dp_mst_put_port_malloc(pos->port);
4700 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4701 struct drm_private_state *state)
4703 struct drm_dp_mst_topology_state *mst_state =
4704 to_dp_mst_topology_state(state);
4705 struct drm_dp_vcpi_allocation *pos, *tmp;
4707 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4708 /* We only keep references to ports with non-zero VCPIs */
4710 drm_dp_mst_put_port_malloc(pos->port);
4718 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
4719 struct drm_dp_mst_topology_state *mst_state)
4721 struct drm_dp_vcpi_allocation *vcpi;
4722 int avail_slots = 63, payload_count = 0;
4724 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4725 /* Releasing VCPI is always OK-even if the port is gone */
4727 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4732 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4733 vcpi->port, vcpi->vcpi);
4735 avail_slots -= vcpi->vcpi;
4736 if (avail_slots < 0) {
4737 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4738 vcpi->port, mst_state,
4739 avail_slots + vcpi->vcpi);
4743 if (++payload_count > mgr->max_payloads) {
4744 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4745 mgr, mst_state, mgr->max_payloads);
4749 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4750 mgr, mst_state, avail_slots,
4757 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
4758 * atomic update is valid
4759 * @state: Pointer to the new &struct drm_dp_mst_topology_state
4761 * Checks the given topology state for an atomic update to ensure that it's
4762 * valid. This includes checking whether there's enough bandwidth to support
4763 * the new VCPI allocations in the atomic update.
4765 * Any atomic drivers supporting DP MST must make sure to call this after
4766 * checking the rest of their state in their
4767 * &drm_mode_config_funcs.atomic_check() callback.
4770 * drm_dp_atomic_find_vcpi_slots()
4771 * drm_dp_atomic_release_vcpi_slots()
4775 * 0 if the new state is valid, negative error code otherwise.
4777 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
4779 struct drm_dp_mst_topology_mgr *mgr;
4780 struct drm_dp_mst_topology_state *mst_state;
4783 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4784 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
4791 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
4793 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
4794 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
4795 .atomic_destroy_state = drm_dp_mst_destroy_state,
4797 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
4800 * drm_atomic_get_mst_topology_state: get MST topology state
4802 * @state: global atomic state
4803 * @mgr: MST topology manager, also the private object in this case
4805 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
4806 * state vtable so that the private object state returned is that of a MST
4807 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
4808 * to care of the locking, so warn if don't hold the connection_mutex.
4812 * The MST topology state or error pointer.
4814 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
4815 struct drm_dp_mst_topology_mgr *mgr)
4817 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
4819 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
4822 * drm_dp_mst_topology_mgr_init - initialise a topology manager
4823 * @mgr: manager struct to initialise
4824 * @dev: device providing this structure - for i2c addition.
4825 * @aux: DP helper aux channel to talk to this device
4826 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
4827 * @max_payloads: maximum number of payloads this GPU can source
4828 * @conn_base_id: the connector object ID the MST device is connected to.
4830 * Return 0 for success, or negative error code on failure
4832 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4833 struct drm_device *dev, struct drm_dp_aux *aux,
4834 int max_dpcd_transaction_bytes,
4835 int max_payloads, int conn_base_id)
4837 struct drm_dp_mst_topology_state *mst_state;
4839 mutex_init(&mgr->lock);
4840 mutex_init(&mgr->qlock);
4841 mutex_init(&mgr->payload_lock);
4842 mutex_init(&mgr->delayed_destroy_lock);
4843 mutex_init(&mgr->up_req_lock);
4844 mutex_init(&mgr->probe_lock);
4845 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4846 mutex_init(&mgr->topology_ref_history_lock);
4848 INIT_LIST_HEAD(&mgr->tx_msg_downq);
4849 INIT_LIST_HEAD(&mgr->destroy_port_list);
4850 INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
4851 INIT_LIST_HEAD(&mgr->up_req_list);
4852 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
4853 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
4854 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
4855 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
4856 init_waitqueue_head(&mgr->tx_waitq);
4859 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
4860 mgr->max_payloads = max_payloads;
4861 mgr->conn_base_id = conn_base_id;
4862 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
4863 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
4865 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
4868 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
4869 if (!mgr->proposed_vcpis)
4871 set_bit(0, &mgr->payload_mask);
4873 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
4874 if (mst_state == NULL)
4877 mst_state->mgr = mgr;
4878 INIT_LIST_HEAD(&mst_state->vcpis);
4880 drm_atomic_private_obj_init(dev, &mgr->base,
4882 &drm_dp_mst_topology_state_funcs);
4886 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4889 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4890 * @mgr: manager to destroy
4892 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4894 drm_dp_mst_topology_mgr_set_mst(mgr, false);
4895 flush_work(&mgr->work);
4896 cancel_work_sync(&mgr->delayed_destroy_work);
4897 mutex_lock(&mgr->payload_lock);
4898 kfree(mgr->payloads);
4899 mgr->payloads = NULL;
4900 kfree(mgr->proposed_vcpis);
4901 mgr->proposed_vcpis = NULL;
4902 mutex_unlock(&mgr->payload_lock);
4905 drm_atomic_private_obj_fini(&mgr->base);
4908 mutex_destroy(&mgr->delayed_destroy_lock);
4909 mutex_destroy(&mgr->payload_lock);
4910 mutex_destroy(&mgr->qlock);
4911 mutex_destroy(&mgr->lock);
4912 mutex_destroy(&mgr->up_req_lock);
4913 mutex_destroy(&mgr->probe_lock);
4914 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4915 mutex_destroy(&mgr->topology_ref_history_lock);
4918 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4920 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4924 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4927 for (i = 0; i < num - 1; i++) {
4928 if (msgs[i].flags & I2C_M_RD ||
4933 return msgs[num - 1].flags & I2C_M_RD &&
4934 msgs[num - 1].len <= 0xff;
4938 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4941 struct drm_dp_aux *aux = adapter->algo_data;
4942 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4943 struct drm_dp_mst_branch *mstb;
4944 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4946 struct drm_dp_sideband_msg_req_body msg;
4947 struct drm_dp_sideband_msg_tx *txmsg = NULL;
4950 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4954 if (!remote_i2c_read_ok(msgs, num)) {
4955 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4960 memset(&msg, 0, sizeof(msg));
4961 msg.req_type = DP_REMOTE_I2C_READ;
4962 msg.u.i2c_read.num_transactions = num - 1;
4963 msg.u.i2c_read.port_number = port->port_num;
4964 for (i = 0; i < num - 1; i++) {
4965 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4966 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4967 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4968 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4970 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4971 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4973 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4980 drm_dp_encode_sideband_req(&msg, txmsg);
4982 drm_dp_queue_down_tx(mgr, txmsg);
4984 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4987 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4991 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4995 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5000 drm_dp_mst_topology_put_mstb(mstb);
5004 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5006 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5007 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5008 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5009 I2C_FUNC_10BIT_ADDR;
5012 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5013 .functionality = drm_dp_mst_i2c_functionality,
5014 .master_xfer = drm_dp_mst_i2c_xfer,
5018 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5019 * @aux: DisplayPort AUX channel
5021 * Returns 0 on success or a negative error code on failure.
5023 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5025 aux->ddc.algo = &drm_dp_mst_i2c_algo;
5026 aux->ddc.algo_data = aux;
5027 aux->ddc.retries = 3;
5029 aux->ddc.class = I2C_CLASS_DDC;
5030 aux->ddc.owner = THIS_MODULE;
5031 aux->ddc.dev.parent = aux->dev;
5032 aux->ddc.dev.of_node = aux->dev->of_node;
5034 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5035 sizeof(aux->ddc.name));
5037 return i2c_add_adapter(&aux->ddc);
5041 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5042 * @aux: DisplayPort AUX channel
5044 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5046 i2c_del_adapter(&aux->ddc);