Linux 5.5-rc2
[linux-2.6-microblaze.git] / drivers / gpu / drm / drm_dp_mst_topology.c
1 /*
2  * Copyright © 2014 Red Hat
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30
31 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
32 #include <linux/stacktrace.h>
33 #include <linux/sort.h>
34 #include <linux/timekeeping.h>
35 #include <linux/math64.h>
36 #endif
37
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_dp_mst_helper.h>
41 #include <drm/drm_drv.h>
42 #include <drm/drm_print.h>
43 #include <drm/drm_probe_helper.h>
44
45 #include "drm_crtc_helper_internal.h"
46 #include "drm_dp_mst_topology_internal.h"
47
48 /**
49  * DOC: dp mst helper
50  *
51  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
52  * protocol. The helpers contain a topology manager and bandwidth manager.
53  * The helpers encapsulate the sending and received of sideband msgs.
54  */
55 struct drm_dp_pending_up_req {
56         struct drm_dp_sideband_msg_hdr hdr;
57         struct drm_dp_sideband_msg_req_body msg;
58         struct list_head next;
59 };
60
61 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
62                                   char *buf);
63
64 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
65
66 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
67                                      int id,
68                                      struct drm_dp_payload *payload);
69
70 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
71                                  struct drm_dp_mst_port *port,
72                                  int offset, int size, u8 *bytes);
73 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
74                                   struct drm_dp_mst_port *port,
75                                   int offset, int size, u8 *bytes);
76
77 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
78                                     struct drm_dp_mst_branch *mstb);
79 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
80                                            struct drm_dp_mst_branch *mstb,
81                                            struct drm_dp_mst_port *port);
82 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
83                                  u8 *guid);
84
85 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
86 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
87 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
88
89 #define DBG_PREFIX "[dp_mst]"
90
91 #define DP_STR(x) [DP_ ## x] = #x
92
93 static const char *drm_dp_mst_req_type_str(u8 req_type)
94 {
95         static const char * const req_type_str[] = {
96                 DP_STR(GET_MSG_TRANSACTION_VERSION),
97                 DP_STR(LINK_ADDRESS),
98                 DP_STR(CONNECTION_STATUS_NOTIFY),
99                 DP_STR(ENUM_PATH_RESOURCES),
100                 DP_STR(ALLOCATE_PAYLOAD),
101                 DP_STR(QUERY_PAYLOAD),
102                 DP_STR(RESOURCE_STATUS_NOTIFY),
103                 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
104                 DP_STR(REMOTE_DPCD_READ),
105                 DP_STR(REMOTE_DPCD_WRITE),
106                 DP_STR(REMOTE_I2C_READ),
107                 DP_STR(REMOTE_I2C_WRITE),
108                 DP_STR(POWER_UP_PHY),
109                 DP_STR(POWER_DOWN_PHY),
110                 DP_STR(SINK_EVENT_NOTIFY),
111                 DP_STR(QUERY_STREAM_ENC_STATUS),
112         };
113
114         if (req_type >= ARRAY_SIZE(req_type_str) ||
115             !req_type_str[req_type])
116                 return "unknown";
117
118         return req_type_str[req_type];
119 }
120
121 #undef DP_STR
122 #define DP_STR(x) [DP_NAK_ ## x] = #x
123
124 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
125 {
126         static const char * const nak_reason_str[] = {
127                 DP_STR(WRITE_FAILURE),
128                 DP_STR(INVALID_READ),
129                 DP_STR(CRC_FAILURE),
130                 DP_STR(BAD_PARAM),
131                 DP_STR(DEFER),
132                 DP_STR(LINK_FAILURE),
133                 DP_STR(NO_RESOURCES),
134                 DP_STR(DPCD_FAIL),
135                 DP_STR(I2C_NAK),
136                 DP_STR(ALLOCATE_FAIL),
137         };
138
139         if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
140             !nak_reason_str[nak_reason])
141                 return "unknown";
142
143         return nak_reason_str[nak_reason];
144 }
145
146 #undef DP_STR
147 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
148
149 static const char *drm_dp_mst_sideband_tx_state_str(int state)
150 {
151         static const char * const sideband_reason_str[] = {
152                 DP_STR(QUEUED),
153                 DP_STR(START_SEND),
154                 DP_STR(SENT),
155                 DP_STR(RX),
156                 DP_STR(TIMEOUT),
157         };
158
159         if (state >= ARRAY_SIZE(sideband_reason_str) ||
160             !sideband_reason_str[state])
161                 return "unknown";
162
163         return sideband_reason_str[state];
164 }
165
166 static int
167 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
168 {
169         int i;
170         u8 unpacked_rad[16];
171
172         for (i = 0; i < lct; i++) {
173                 if (i % 2)
174                         unpacked_rad[i] = rad[i / 2] >> 4;
175                 else
176                         unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
177         }
178
179         /* TODO: Eventually add something to printk so we can format the rad
180          * like this: 1.2.3
181          */
182         return snprintf(out, len, "%*phC", lct, unpacked_rad);
183 }
184
185 /* sideband msg handling */
186 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
187 {
188         u8 bitmask = 0x80;
189         u8 bitshift = 7;
190         u8 array_index = 0;
191         int number_of_bits = num_nibbles * 4;
192         u8 remainder = 0;
193
194         while (number_of_bits != 0) {
195                 number_of_bits--;
196                 remainder <<= 1;
197                 remainder |= (data[array_index] & bitmask) >> bitshift;
198                 bitmask >>= 1;
199                 bitshift--;
200                 if (bitmask == 0) {
201                         bitmask = 0x80;
202                         bitshift = 7;
203                         array_index++;
204                 }
205                 if ((remainder & 0x10) == 0x10)
206                         remainder ^= 0x13;
207         }
208
209         number_of_bits = 4;
210         while (number_of_bits != 0) {
211                 number_of_bits--;
212                 remainder <<= 1;
213                 if ((remainder & 0x10) != 0)
214                         remainder ^= 0x13;
215         }
216
217         return remainder;
218 }
219
220 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
221 {
222         u8 bitmask = 0x80;
223         u8 bitshift = 7;
224         u8 array_index = 0;
225         int number_of_bits = number_of_bytes * 8;
226         u16 remainder = 0;
227
228         while (number_of_bits != 0) {
229                 number_of_bits--;
230                 remainder <<= 1;
231                 remainder |= (data[array_index] & bitmask) >> bitshift;
232                 bitmask >>= 1;
233                 bitshift--;
234                 if (bitmask == 0) {
235                         bitmask = 0x80;
236                         bitshift = 7;
237                         array_index++;
238                 }
239                 if ((remainder & 0x100) == 0x100)
240                         remainder ^= 0xd5;
241         }
242
243         number_of_bits = 8;
244         while (number_of_bits != 0) {
245                 number_of_bits--;
246                 remainder <<= 1;
247                 if ((remainder & 0x100) != 0)
248                         remainder ^= 0xd5;
249         }
250
251         return remainder & 0xff;
252 }
253 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
254 {
255         u8 size = 3;
256         size += (hdr->lct / 2);
257         return size;
258 }
259
260 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
261                                            u8 *buf, int *len)
262 {
263         int idx = 0;
264         int i;
265         u8 crc4;
266         buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
267         for (i = 0; i < (hdr->lct / 2); i++)
268                 buf[idx++] = hdr->rad[i];
269         buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
270                 (hdr->msg_len & 0x3f);
271         buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
272
273         crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
274         buf[idx - 1] |= (crc4 & 0xf);
275
276         *len = idx;
277 }
278
279 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
280                                            u8 *buf, int buflen, u8 *hdrlen)
281 {
282         u8 crc4;
283         u8 len;
284         int i;
285         u8 idx;
286         if (buf[0] == 0)
287                 return false;
288         len = 3;
289         len += ((buf[0] & 0xf0) >> 4) / 2;
290         if (len > buflen)
291                 return false;
292         crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
293
294         if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
295                 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
296                 return false;
297         }
298
299         hdr->lct = (buf[0] & 0xf0) >> 4;
300         hdr->lcr = (buf[0] & 0xf);
301         idx = 1;
302         for (i = 0; i < (hdr->lct / 2); i++)
303                 hdr->rad[i] = buf[idx++];
304         hdr->broadcast = (buf[idx] >> 7) & 0x1;
305         hdr->path_msg = (buf[idx] >> 6) & 0x1;
306         hdr->msg_len = buf[idx] & 0x3f;
307         idx++;
308         hdr->somt = (buf[idx] >> 7) & 0x1;
309         hdr->eomt = (buf[idx] >> 6) & 0x1;
310         hdr->seqno = (buf[idx] >> 4) & 0x1;
311         idx++;
312         *hdrlen = idx;
313         return true;
314 }
315
316 void
317 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
318                            struct drm_dp_sideband_msg_tx *raw)
319 {
320         int idx = 0;
321         int i;
322         u8 *buf = raw->msg;
323         buf[idx++] = req->req_type & 0x7f;
324
325         switch (req->req_type) {
326         case DP_ENUM_PATH_RESOURCES:
327         case DP_POWER_DOWN_PHY:
328         case DP_POWER_UP_PHY:
329                 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
330                 idx++;
331                 break;
332         case DP_ALLOCATE_PAYLOAD:
333                 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
334                         (req->u.allocate_payload.number_sdp_streams & 0xf);
335                 idx++;
336                 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
337                 idx++;
338                 buf[idx] = (req->u.allocate_payload.pbn >> 8);
339                 idx++;
340                 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
341                 idx++;
342                 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
343                         buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
344                                 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
345                         idx++;
346                 }
347                 if (req->u.allocate_payload.number_sdp_streams & 1) {
348                         i = req->u.allocate_payload.number_sdp_streams - 1;
349                         buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
350                         idx++;
351                 }
352                 break;
353         case DP_QUERY_PAYLOAD:
354                 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
355                 idx++;
356                 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
357                 idx++;
358                 break;
359         case DP_REMOTE_DPCD_READ:
360                 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
361                 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
362                 idx++;
363                 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
364                 idx++;
365                 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
366                 idx++;
367                 buf[idx] = (req->u.dpcd_read.num_bytes);
368                 idx++;
369                 break;
370
371         case DP_REMOTE_DPCD_WRITE:
372                 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
373                 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
374                 idx++;
375                 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
376                 idx++;
377                 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
378                 idx++;
379                 buf[idx] = (req->u.dpcd_write.num_bytes);
380                 idx++;
381                 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
382                 idx += req->u.dpcd_write.num_bytes;
383                 break;
384         case DP_REMOTE_I2C_READ:
385                 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
386                 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
387                 idx++;
388                 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
389                         buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
390                         idx++;
391                         buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
392                         idx++;
393                         memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
394                         idx += req->u.i2c_read.transactions[i].num_bytes;
395
396                         buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
397                         buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
398                         idx++;
399                 }
400                 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
401                 idx++;
402                 buf[idx] = (req->u.i2c_read.num_bytes_read);
403                 idx++;
404                 break;
405
406         case DP_REMOTE_I2C_WRITE:
407                 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
408                 idx++;
409                 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
410                 idx++;
411                 buf[idx] = (req->u.i2c_write.num_bytes);
412                 idx++;
413                 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
414                 idx += req->u.i2c_write.num_bytes;
415                 break;
416         }
417         raw->cur_len = idx;
418 }
419 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
420
421 /* Decode a sideband request we've encoded, mainly used for debugging */
422 int
423 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
424                            struct drm_dp_sideband_msg_req_body *req)
425 {
426         const u8 *buf = raw->msg;
427         int i, idx = 0;
428
429         req->req_type = buf[idx++] & 0x7f;
430         switch (req->req_type) {
431         case DP_ENUM_PATH_RESOURCES:
432         case DP_POWER_DOWN_PHY:
433         case DP_POWER_UP_PHY:
434                 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
435                 break;
436         case DP_ALLOCATE_PAYLOAD:
437                 {
438                         struct drm_dp_allocate_payload *a =
439                                 &req->u.allocate_payload;
440
441                         a->number_sdp_streams = buf[idx] & 0xf;
442                         a->port_number = (buf[idx] >> 4) & 0xf;
443
444                         WARN_ON(buf[++idx] & 0x80);
445                         a->vcpi = buf[idx] & 0x7f;
446
447                         a->pbn = buf[++idx] << 8;
448                         a->pbn |= buf[++idx];
449
450                         idx++;
451                         for (i = 0; i < a->number_sdp_streams; i++) {
452                                 a->sdp_stream_sink[i] =
453                                         (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
454                         }
455                 }
456                 break;
457         case DP_QUERY_PAYLOAD:
458                 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
459                 WARN_ON(buf[++idx] & 0x80);
460                 req->u.query_payload.vcpi = buf[idx] & 0x7f;
461                 break;
462         case DP_REMOTE_DPCD_READ:
463                 {
464                         struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
465
466                         r->port_number = (buf[idx] >> 4) & 0xf;
467
468                         r->dpcd_address = (buf[idx] << 16) & 0xf0000;
469                         r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
470                         r->dpcd_address |= buf[++idx] & 0xff;
471
472                         r->num_bytes = buf[++idx];
473                 }
474                 break;
475         case DP_REMOTE_DPCD_WRITE:
476                 {
477                         struct drm_dp_remote_dpcd_write *w =
478                                 &req->u.dpcd_write;
479
480                         w->port_number = (buf[idx] >> 4) & 0xf;
481
482                         w->dpcd_address = (buf[idx] << 16) & 0xf0000;
483                         w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
484                         w->dpcd_address |= buf[++idx] & 0xff;
485
486                         w->num_bytes = buf[++idx];
487
488                         w->bytes = kmemdup(&buf[++idx], w->num_bytes,
489                                            GFP_KERNEL);
490                         if (!w->bytes)
491                                 return -ENOMEM;
492                 }
493                 break;
494         case DP_REMOTE_I2C_READ:
495                 {
496                         struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
497                         struct drm_dp_remote_i2c_read_tx *tx;
498                         bool failed = false;
499
500                         r->num_transactions = buf[idx] & 0x3;
501                         r->port_number = (buf[idx] >> 4) & 0xf;
502                         for (i = 0; i < r->num_transactions; i++) {
503                                 tx = &r->transactions[i];
504
505                                 tx->i2c_dev_id = buf[++idx] & 0x7f;
506                                 tx->num_bytes = buf[++idx];
507                                 tx->bytes = kmemdup(&buf[++idx],
508                                                     tx->num_bytes,
509                                                     GFP_KERNEL);
510                                 if (!tx->bytes) {
511                                         failed = true;
512                                         break;
513                                 }
514                                 idx += tx->num_bytes;
515                                 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
516                                 tx->i2c_transaction_delay = buf[idx] & 0xf;
517                         }
518
519                         if (failed) {
520                                 for (i = 0; i < r->num_transactions; i++)
521                                         kfree(tx->bytes);
522                                 return -ENOMEM;
523                         }
524
525                         r->read_i2c_device_id = buf[++idx] & 0x7f;
526                         r->num_bytes_read = buf[++idx];
527                 }
528                 break;
529         case DP_REMOTE_I2C_WRITE:
530                 {
531                         struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
532
533                         w->port_number = (buf[idx] >> 4) & 0xf;
534                         w->write_i2c_device_id = buf[++idx] & 0x7f;
535                         w->num_bytes = buf[++idx];
536                         w->bytes = kmemdup(&buf[++idx], w->num_bytes,
537                                            GFP_KERNEL);
538                         if (!w->bytes)
539                                 return -ENOMEM;
540                 }
541                 break;
542         }
543
544         return 0;
545 }
546 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
547
548 void
549 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
550                                   int indent, struct drm_printer *printer)
551 {
552         int i;
553
554 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
555         if (req->req_type == DP_LINK_ADDRESS) {
556                 /* No contents to print */
557                 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
558                 return;
559         }
560
561         P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
562         indent++;
563
564         switch (req->req_type) {
565         case DP_ENUM_PATH_RESOURCES:
566         case DP_POWER_DOWN_PHY:
567         case DP_POWER_UP_PHY:
568                 P("port=%d\n", req->u.port_num.port_number);
569                 break;
570         case DP_ALLOCATE_PAYLOAD:
571                 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
572                   req->u.allocate_payload.port_number,
573                   req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
574                   req->u.allocate_payload.number_sdp_streams,
575                   req->u.allocate_payload.number_sdp_streams,
576                   req->u.allocate_payload.sdp_stream_sink);
577                 break;
578         case DP_QUERY_PAYLOAD:
579                 P("port=%d vcpi=%d\n",
580                   req->u.query_payload.port_number,
581                   req->u.query_payload.vcpi);
582                 break;
583         case DP_REMOTE_DPCD_READ:
584                 P("port=%d dpcd_addr=%05x len=%d\n",
585                   req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
586                   req->u.dpcd_read.num_bytes);
587                 break;
588         case DP_REMOTE_DPCD_WRITE:
589                 P("port=%d addr=%05x len=%d: %*ph\n",
590                   req->u.dpcd_write.port_number,
591                   req->u.dpcd_write.dpcd_address,
592                   req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
593                   req->u.dpcd_write.bytes);
594                 break;
595         case DP_REMOTE_I2C_READ:
596                 P("port=%d num_tx=%d id=%d size=%d:\n",
597                   req->u.i2c_read.port_number,
598                   req->u.i2c_read.num_transactions,
599                   req->u.i2c_read.read_i2c_device_id,
600                   req->u.i2c_read.num_bytes_read);
601
602                 indent++;
603                 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
604                         const struct drm_dp_remote_i2c_read_tx *rtx =
605                                 &req->u.i2c_read.transactions[i];
606
607                         P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
608                           i, rtx->i2c_dev_id, rtx->num_bytes,
609                           rtx->no_stop_bit, rtx->i2c_transaction_delay,
610                           rtx->num_bytes, rtx->bytes);
611                 }
612                 break;
613         case DP_REMOTE_I2C_WRITE:
614                 P("port=%d id=%d size=%d: %*ph\n",
615                   req->u.i2c_write.port_number,
616                   req->u.i2c_write.write_i2c_device_id,
617                   req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
618                   req->u.i2c_write.bytes);
619                 break;
620         default:
621                 P("???\n");
622                 break;
623         }
624 #undef P
625 }
626 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
627
628 static inline void
629 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
630                                 const struct drm_dp_sideband_msg_tx *txmsg)
631 {
632         struct drm_dp_sideband_msg_req_body req;
633         char buf[64];
634         int ret;
635         int i;
636
637         drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
638                               sizeof(buf));
639         drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
640                    txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
641                    drm_dp_mst_sideband_tx_state_str(txmsg->state),
642                    txmsg->path_msg, buf);
643
644         ret = drm_dp_decode_sideband_req(txmsg, &req);
645         if (ret) {
646                 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
647                 return;
648         }
649         drm_dp_dump_sideband_msg_req_body(&req, 1, p);
650
651         switch (req.req_type) {
652         case DP_REMOTE_DPCD_WRITE:
653                 kfree(req.u.dpcd_write.bytes);
654                 break;
655         case DP_REMOTE_I2C_READ:
656                 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
657                         kfree(req.u.i2c_read.transactions[i].bytes);
658                 break;
659         case DP_REMOTE_I2C_WRITE:
660                 kfree(req.u.i2c_write.bytes);
661                 break;
662         }
663 }
664
665 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
666 {
667         u8 crc4;
668         crc4 = drm_dp_msg_data_crc4(msg, len);
669         msg[len] = crc4;
670 }
671
672 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
673                                          struct drm_dp_sideband_msg_tx *raw)
674 {
675         int idx = 0;
676         u8 *buf = raw->msg;
677
678         buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
679
680         raw->cur_len = idx;
681 }
682
683 /* this adds a chunk of msg to the builder to get the final msg */
684 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
685                                       u8 *replybuf, u8 replybuflen, bool hdr)
686 {
687         int ret;
688         u8 crc4;
689
690         if (hdr) {
691                 u8 hdrlen;
692                 struct drm_dp_sideband_msg_hdr recv_hdr;
693                 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
694                 if (ret == false) {
695                         print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
696                         return false;
697                 }
698
699                 /*
700                  * ignore out-of-order messages or messages that are part of a
701                  * failed transaction
702                  */
703                 if (!recv_hdr.somt && !msg->have_somt)
704                         return false;
705
706                 /* get length contained in this portion */
707                 msg->curchunk_len = recv_hdr.msg_len;
708                 msg->curchunk_hdrlen = hdrlen;
709
710                 /* we have already gotten an somt - don't bother parsing */
711                 if (recv_hdr.somt && msg->have_somt)
712                         return false;
713
714                 if (recv_hdr.somt) {
715                         memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
716                         msg->have_somt = true;
717                 }
718                 if (recv_hdr.eomt)
719                         msg->have_eomt = true;
720
721                 /* copy the bytes for the remainder of this header chunk */
722                 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
723                 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
724         } else {
725                 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
726                 msg->curchunk_idx += replybuflen;
727         }
728
729         if (msg->curchunk_idx >= msg->curchunk_len) {
730                 /* do CRC */
731                 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
732                 /* copy chunk into bigger msg */
733                 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
734                 msg->curlen += msg->curchunk_len - 1;
735         }
736         return true;
737 }
738
739 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
740                                                struct drm_dp_sideband_msg_reply_body *repmsg)
741 {
742         int idx = 1;
743         int i;
744         memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
745         idx += 16;
746         repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
747         idx++;
748         if (idx > raw->curlen)
749                 goto fail_len;
750         for (i = 0; i < repmsg->u.link_addr.nports; i++) {
751                 if (raw->msg[idx] & 0x80)
752                         repmsg->u.link_addr.ports[i].input_port = 1;
753
754                 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
755                 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
756
757                 idx++;
758                 if (idx > raw->curlen)
759                         goto fail_len;
760                 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
761                 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
762                 if (repmsg->u.link_addr.ports[i].input_port == 0)
763                         repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
764                 idx++;
765                 if (idx > raw->curlen)
766                         goto fail_len;
767                 if (repmsg->u.link_addr.ports[i].input_port == 0) {
768                         repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
769                         idx++;
770                         if (idx > raw->curlen)
771                                 goto fail_len;
772                         memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
773                         idx += 16;
774                         if (idx > raw->curlen)
775                                 goto fail_len;
776                         repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
777                         repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
778                         idx++;
779
780                 }
781                 if (idx > raw->curlen)
782                         goto fail_len;
783         }
784
785         return true;
786 fail_len:
787         DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
788         return false;
789 }
790
791 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
792                                                    struct drm_dp_sideband_msg_reply_body *repmsg)
793 {
794         int idx = 1;
795         repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
796         idx++;
797         if (idx > raw->curlen)
798                 goto fail_len;
799         repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
800         idx++;
801         if (idx > raw->curlen)
802                 goto fail_len;
803
804         memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
805         return true;
806 fail_len:
807         DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
808         return false;
809 }
810
811 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
812                                                       struct drm_dp_sideband_msg_reply_body *repmsg)
813 {
814         int idx = 1;
815         repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
816         idx++;
817         if (idx > raw->curlen)
818                 goto fail_len;
819         return true;
820 fail_len:
821         DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
822         return false;
823 }
824
825 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
826                                                       struct drm_dp_sideband_msg_reply_body *repmsg)
827 {
828         int idx = 1;
829
830         repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
831         idx++;
832         if (idx > raw->curlen)
833                 goto fail_len;
834         repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
835         idx++;
836         /* TODO check */
837         memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
838         return true;
839 fail_len:
840         DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
841         return false;
842 }
843
844 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
845                                                           struct drm_dp_sideband_msg_reply_body *repmsg)
846 {
847         int idx = 1;
848         repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
849         idx++;
850         if (idx > raw->curlen)
851                 goto fail_len;
852         repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
853         idx += 2;
854         if (idx > raw->curlen)
855                 goto fail_len;
856         repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
857         idx += 2;
858         if (idx > raw->curlen)
859                 goto fail_len;
860         return true;
861 fail_len:
862         DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
863         return false;
864 }
865
866 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
867                                                           struct drm_dp_sideband_msg_reply_body *repmsg)
868 {
869         int idx = 1;
870         repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
871         idx++;
872         if (idx > raw->curlen)
873                 goto fail_len;
874         repmsg->u.allocate_payload.vcpi = raw->msg[idx];
875         idx++;
876         if (idx > raw->curlen)
877                 goto fail_len;
878         repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
879         idx += 2;
880         if (idx > raw->curlen)
881                 goto fail_len;
882         return true;
883 fail_len:
884         DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
885         return false;
886 }
887
888 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
889                                                     struct drm_dp_sideband_msg_reply_body *repmsg)
890 {
891         int idx = 1;
892         repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
893         idx++;
894         if (idx > raw->curlen)
895                 goto fail_len;
896         repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
897         idx += 2;
898         if (idx > raw->curlen)
899                 goto fail_len;
900         return true;
901 fail_len:
902         DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
903         return false;
904 }
905
906 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
907                                                        struct drm_dp_sideband_msg_reply_body *repmsg)
908 {
909         int idx = 1;
910
911         repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
912         idx++;
913         if (idx > raw->curlen) {
914                 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
915                               idx, raw->curlen);
916                 return false;
917         }
918         return true;
919 }
920
921 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
922                                         struct drm_dp_sideband_msg_reply_body *msg)
923 {
924         memset(msg, 0, sizeof(*msg));
925         msg->reply_type = (raw->msg[0] & 0x80) >> 7;
926         msg->req_type = (raw->msg[0] & 0x7f);
927
928         if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
929                 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
930                 msg->u.nak.reason = raw->msg[17];
931                 msg->u.nak.nak_data = raw->msg[18];
932                 return false;
933         }
934
935         switch (msg->req_type) {
936         case DP_LINK_ADDRESS:
937                 return drm_dp_sideband_parse_link_address(raw, msg);
938         case DP_QUERY_PAYLOAD:
939                 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
940         case DP_REMOTE_DPCD_READ:
941                 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
942         case DP_REMOTE_DPCD_WRITE:
943                 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
944         case DP_REMOTE_I2C_READ:
945                 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
946         case DP_ENUM_PATH_RESOURCES:
947                 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
948         case DP_ALLOCATE_PAYLOAD:
949                 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
950         case DP_POWER_DOWN_PHY:
951         case DP_POWER_UP_PHY:
952                 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
953         default:
954                 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
955                           drm_dp_mst_req_type_str(msg->req_type));
956                 return false;
957         }
958 }
959
960 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
961                                                            struct drm_dp_sideband_msg_req_body *msg)
962 {
963         int idx = 1;
964
965         msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
966         idx++;
967         if (idx > raw->curlen)
968                 goto fail_len;
969
970         memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
971         idx += 16;
972         if (idx > raw->curlen)
973                 goto fail_len;
974
975         msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
976         msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
977         msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
978         msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
979         msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
980         idx++;
981         return true;
982 fail_len:
983         DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
984         return false;
985 }
986
987 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
988                                                            struct drm_dp_sideband_msg_req_body *msg)
989 {
990         int idx = 1;
991
992         msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
993         idx++;
994         if (idx > raw->curlen)
995                 goto fail_len;
996
997         memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
998         idx += 16;
999         if (idx > raw->curlen)
1000                 goto fail_len;
1001
1002         msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1003         idx++;
1004         return true;
1005 fail_len:
1006         DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1007         return false;
1008 }
1009
1010 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1011                                       struct drm_dp_sideband_msg_req_body *msg)
1012 {
1013         memset(msg, 0, sizeof(*msg));
1014         msg->req_type = (raw->msg[0] & 0x7f);
1015
1016         switch (msg->req_type) {
1017         case DP_CONNECTION_STATUS_NOTIFY:
1018                 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1019         case DP_RESOURCE_STATUS_NOTIFY:
1020                 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1021         default:
1022                 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1023                           drm_dp_mst_req_type_str(msg->req_type));
1024                 return false;
1025         }
1026 }
1027
1028 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1029 {
1030         struct drm_dp_sideband_msg_req_body req;
1031
1032         req.req_type = DP_REMOTE_DPCD_WRITE;
1033         req.u.dpcd_write.port_number = port_num;
1034         req.u.dpcd_write.dpcd_address = offset;
1035         req.u.dpcd_write.num_bytes = num_bytes;
1036         req.u.dpcd_write.bytes = bytes;
1037         drm_dp_encode_sideband_req(&req, msg);
1038
1039         return 0;
1040 }
1041
1042 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1043 {
1044         struct drm_dp_sideband_msg_req_body req;
1045
1046         req.req_type = DP_LINK_ADDRESS;
1047         drm_dp_encode_sideband_req(&req, msg);
1048         return 0;
1049 }
1050
1051 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1052 {
1053         struct drm_dp_sideband_msg_req_body req;
1054
1055         req.req_type = DP_ENUM_PATH_RESOURCES;
1056         req.u.port_num.port_number = port_num;
1057         drm_dp_encode_sideband_req(&req, msg);
1058         msg->path_msg = true;
1059         return 0;
1060 }
1061
1062 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1063                                   u8 vcpi, uint16_t pbn,
1064                                   u8 number_sdp_streams,
1065                                   u8 *sdp_stream_sink)
1066 {
1067         struct drm_dp_sideband_msg_req_body req;
1068         memset(&req, 0, sizeof(req));
1069         req.req_type = DP_ALLOCATE_PAYLOAD;
1070         req.u.allocate_payload.port_number = port_num;
1071         req.u.allocate_payload.vcpi = vcpi;
1072         req.u.allocate_payload.pbn = pbn;
1073         req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1074         memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1075                    number_sdp_streams);
1076         drm_dp_encode_sideband_req(&req, msg);
1077         msg->path_msg = true;
1078         return 0;
1079 }
1080
1081 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1082                                   int port_num, bool power_up)
1083 {
1084         struct drm_dp_sideband_msg_req_body req;
1085
1086         if (power_up)
1087                 req.req_type = DP_POWER_UP_PHY;
1088         else
1089                 req.req_type = DP_POWER_DOWN_PHY;
1090
1091         req.u.port_num.port_number = port_num;
1092         drm_dp_encode_sideband_req(&req, msg);
1093         msg->path_msg = true;
1094         return 0;
1095 }
1096
1097 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1098                                         struct drm_dp_vcpi *vcpi)
1099 {
1100         int ret, vcpi_ret;
1101
1102         mutex_lock(&mgr->payload_lock);
1103         ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1104         if (ret > mgr->max_payloads) {
1105                 ret = -EINVAL;
1106                 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1107                 goto out_unlock;
1108         }
1109
1110         vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1111         if (vcpi_ret > mgr->max_payloads) {
1112                 ret = -EINVAL;
1113                 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1114                 goto out_unlock;
1115         }
1116
1117         set_bit(ret, &mgr->payload_mask);
1118         set_bit(vcpi_ret, &mgr->vcpi_mask);
1119         vcpi->vcpi = vcpi_ret + 1;
1120         mgr->proposed_vcpis[ret - 1] = vcpi;
1121 out_unlock:
1122         mutex_unlock(&mgr->payload_lock);
1123         return ret;
1124 }
1125
1126 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1127                                       int vcpi)
1128 {
1129         int i;
1130         if (vcpi == 0)
1131                 return;
1132
1133         mutex_lock(&mgr->payload_lock);
1134         DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1135         clear_bit(vcpi - 1, &mgr->vcpi_mask);
1136
1137         for (i = 0; i < mgr->max_payloads; i++) {
1138                 if (mgr->proposed_vcpis[i] &&
1139                     mgr->proposed_vcpis[i]->vcpi == vcpi) {
1140                         mgr->proposed_vcpis[i] = NULL;
1141                         clear_bit(i + 1, &mgr->payload_mask);
1142                 }
1143         }
1144         mutex_unlock(&mgr->payload_lock);
1145 }
1146
1147 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1148                               struct drm_dp_sideband_msg_tx *txmsg)
1149 {
1150         unsigned int state;
1151
1152         /*
1153          * All updates to txmsg->state are protected by mgr->qlock, and the two
1154          * cases we check here are terminal states. For those the barriers
1155          * provided by the wake_up/wait_event pair are enough.
1156          */
1157         state = READ_ONCE(txmsg->state);
1158         return (state == DRM_DP_SIDEBAND_TX_RX ||
1159                 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1160 }
1161
1162 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1163                                     struct drm_dp_sideband_msg_tx *txmsg)
1164 {
1165         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1166         int ret;
1167
1168         ret = wait_event_timeout(mgr->tx_waitq,
1169                                  check_txmsg_state(mgr, txmsg),
1170                                  (4 * HZ));
1171         mutex_lock(&mstb->mgr->qlock);
1172         if (ret > 0) {
1173                 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1174                         ret = -EIO;
1175                         goto out;
1176                 }
1177         } else {
1178                 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1179
1180                 /* dump some state */
1181                 ret = -EIO;
1182
1183                 /* remove from q */
1184                 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1185                     txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1186                         list_del(&txmsg->next);
1187                 }
1188
1189                 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1190                     txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1191                         mstb->tx_slots[txmsg->seqno] = NULL;
1192                 }
1193         }
1194 out:
1195         if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1196                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1197
1198                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1199         }
1200         mutex_unlock(&mgr->qlock);
1201
1202         return ret;
1203 }
1204
1205 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1206 {
1207         struct drm_dp_mst_branch *mstb;
1208
1209         mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1210         if (!mstb)
1211                 return NULL;
1212
1213         mstb->lct = lct;
1214         if (lct > 1)
1215                 memcpy(mstb->rad, rad, lct / 2);
1216         INIT_LIST_HEAD(&mstb->ports);
1217         kref_init(&mstb->topology_kref);
1218         kref_init(&mstb->malloc_kref);
1219         return mstb;
1220 }
1221
1222 static void drm_dp_free_mst_branch_device(struct kref *kref)
1223 {
1224         struct drm_dp_mst_branch *mstb =
1225                 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1226
1227         if (mstb->port_parent)
1228                 drm_dp_mst_put_port_malloc(mstb->port_parent);
1229
1230         kfree(mstb);
1231 }
1232
1233 /**
1234  * DOC: Branch device and port refcounting
1235  *
1236  * Topology refcount overview
1237  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1238  *
1239  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1240  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1241  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1242  *
1243  * Topology refcounts are not exposed to drivers, and are handled internally
1244  * by the DP MST helpers. The helpers use them in order to prevent the
1245  * in-memory topology state from being changed in the middle of critical
1246  * operations like changing the internal state of payload allocations. This
1247  * means each branch and port will be considered to be connected to the rest
1248  * of the topology until its topology refcount reaches zero. Additionally,
1249  * for ports this means that their associated &struct drm_connector will stay
1250  * registered with userspace until the port's refcount reaches 0.
1251  *
1252  * Malloc refcount overview
1253  * ~~~~~~~~~~~~~~~~~~~~~~~~
1254  *
1255  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1256  * drm_dp_mst_branch allocated even after all of its topology references have
1257  * been dropped, so that the driver or MST helpers can safely access each
1258  * branch's last known state before it was disconnected from the topology.
1259  * When the malloc refcount of a port or branch reaches 0, the memory
1260  * allocation containing the &struct drm_dp_mst_branch or &struct
1261  * drm_dp_mst_port respectively will be freed.
1262  *
1263  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1264  * to drivers. As of writing this documentation, there are no drivers that
1265  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1266  * helpers. Exposing this API to drivers in a race-free manner would take more
1267  * tweaking of the refcounting scheme, however patches are welcome provided
1268  * there is a legitimate driver usecase for this.
1269  *
1270  * Refcount relationships in a topology
1271  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1272  *
1273  * Let's take a look at why the relationship between topology and malloc
1274  * refcounts is designed the way it is.
1275  *
1276  * .. kernel-figure:: dp-mst/topology-figure-1.dot
1277  *
1278  *    An example of topology and malloc refs in a DP MST topology with two
1279  *    active payloads. Topology refcount increments are indicated by solid
1280  *    lines, and malloc refcount increments are indicated by dashed lines.
1281  *    Each starts from the branch which incremented the refcount, and ends at
1282  *    the branch to which the refcount belongs to, i.e. the arrow points the
1283  *    same way as the C pointers used to reference a structure.
1284  *
1285  * As you can see in the above figure, every branch increments the topology
1286  * refcount of its children, and increments the malloc refcount of its
1287  * parent. Additionally, every payload increments the malloc refcount of its
1288  * assigned port by 1.
1289  *
1290  * So, what would happen if MSTB #3 from the above figure was unplugged from
1291  * the system, but the driver hadn't yet removed payload #2 from port #3? The
1292  * topology would start to look like the figure below.
1293  *
1294  * .. kernel-figure:: dp-mst/topology-figure-2.dot
1295  *
1296  *    Ports and branch devices which have been released from memory are
1297  *    colored grey, and references which have been removed are colored red.
1298  *
1299  * Whenever a port or branch device's topology refcount reaches zero, it will
1300  * decrement the topology refcounts of all its children, the malloc refcount
1301  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1302  * #4, this means they both have been disconnected from the topology and freed
1303  * from memory. But, because payload #2 is still holding a reference to port
1304  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1305  * is still accessible from memory. This also means port #3 has not yet
1306  * decremented the malloc refcount of MSTB #3, so its &struct
1307  * drm_dp_mst_branch will also stay allocated in memory until port #3's
1308  * malloc refcount reaches 0.
1309  *
1310  * This relationship is necessary because in order to release payload #2, we
1311  * need to be able to figure out the last relative of port #3 that's still
1312  * connected to the topology. In this case, we would travel up the topology as
1313  * shown below.
1314  *
1315  * .. kernel-figure:: dp-mst/topology-figure-3.dot
1316  *
1317  * And finally, remove payload #2 by communicating with port #2 through
1318  * sideband transactions.
1319  */
1320
1321 /**
1322  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1323  * device
1324  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1325  *
1326  * Increments &drm_dp_mst_branch.malloc_kref. When
1327  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1328  * will be released and @mstb may no longer be used.
1329  *
1330  * See also: drm_dp_mst_put_mstb_malloc()
1331  */
1332 static void
1333 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1334 {
1335         kref_get(&mstb->malloc_kref);
1336         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1337 }
1338
1339 /**
1340  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1341  * device
1342  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1343  *
1344  * Decrements &drm_dp_mst_branch.malloc_kref. When
1345  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1346  * will be released and @mstb may no longer be used.
1347  *
1348  * See also: drm_dp_mst_get_mstb_malloc()
1349  */
1350 static void
1351 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1352 {
1353         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1354         kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1355 }
1356
1357 static void drm_dp_free_mst_port(struct kref *kref)
1358 {
1359         struct drm_dp_mst_port *port =
1360                 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1361
1362         drm_dp_mst_put_mstb_malloc(port->parent);
1363         kfree(port);
1364 }
1365
1366 /**
1367  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1368  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1369  *
1370  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1371  * reaches 0, the memory allocation for @port will be released and @port may
1372  * no longer be used.
1373  *
1374  * Because @port could potentially be freed at any time by the DP MST helpers
1375  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1376  * function, drivers that which to make use of &struct drm_dp_mst_port should
1377  * ensure that they grab at least one main malloc reference to their MST ports
1378  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1379  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1380  *
1381  * See also: drm_dp_mst_put_port_malloc()
1382  */
1383 void
1384 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1385 {
1386         kref_get(&port->malloc_kref);
1387         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1388 }
1389 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1390
1391 /**
1392  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1393  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1394  *
1395  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1396  * reaches 0, the memory allocation for @port will be released and @port may
1397  * no longer be used.
1398  *
1399  * See also: drm_dp_mst_get_port_malloc()
1400  */
1401 void
1402 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1403 {
1404         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1405         kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1406 }
1407 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1408
1409 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1410
1411 #define STACK_DEPTH 8
1412
1413 static noinline void
1414 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1415                     struct drm_dp_mst_topology_ref_history *history,
1416                     enum drm_dp_mst_topology_ref_type type)
1417 {
1418         struct drm_dp_mst_topology_ref_entry *entry = NULL;
1419         depot_stack_handle_t backtrace;
1420         ulong stack_entries[STACK_DEPTH];
1421         uint n;
1422         int i;
1423
1424         n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1425         backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1426         if (!backtrace)
1427                 return;
1428
1429         /* Try to find an existing entry for this backtrace */
1430         for (i = 0; i < history->len; i++) {
1431                 if (history->entries[i].backtrace == backtrace) {
1432                         entry = &history->entries[i];
1433                         break;
1434                 }
1435         }
1436
1437         /* Otherwise add one */
1438         if (!entry) {
1439                 struct drm_dp_mst_topology_ref_entry *new;
1440                 int new_len = history->len + 1;
1441
1442                 new = krealloc(history->entries, sizeof(*new) * new_len,
1443                                GFP_KERNEL);
1444                 if (!new)
1445                         return;
1446
1447                 entry = &new[history->len];
1448                 history->len = new_len;
1449                 history->entries = new;
1450
1451                 entry->backtrace = backtrace;
1452                 entry->type = type;
1453                 entry->count = 0;
1454         }
1455         entry->count++;
1456         entry->ts_nsec = ktime_get_ns();
1457 }
1458
1459 static int
1460 topology_ref_history_cmp(const void *a, const void *b)
1461 {
1462         const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1463
1464         if (entry_a->ts_nsec > entry_b->ts_nsec)
1465                 return 1;
1466         else if (entry_a->ts_nsec < entry_b->ts_nsec)
1467                 return -1;
1468         else
1469                 return 0;
1470 }
1471
1472 static inline const char *
1473 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1474 {
1475         if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1476                 return "get";
1477         else
1478                 return "put";
1479 }
1480
1481 static void
1482 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1483                             void *ptr, const char *type_str)
1484 {
1485         struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1486         char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1487         int i;
1488
1489         if (!buf)
1490                 return;
1491
1492         if (!history->len)
1493                 goto out;
1494
1495         /* First, sort the list so that it goes from oldest to newest
1496          * reference entry
1497          */
1498         sort(history->entries, history->len, sizeof(*history->entries),
1499              topology_ref_history_cmp, NULL);
1500
1501         drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1502                    type_str, ptr);
1503
1504         for (i = 0; i < history->len; i++) {
1505                 const struct drm_dp_mst_topology_ref_entry *entry =
1506                         &history->entries[i];
1507                 ulong *entries;
1508                 uint nr_entries;
1509                 u64 ts_nsec = entry->ts_nsec;
1510                 u32 rem_nsec = do_div(ts_nsec, 1000000000);
1511
1512                 nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1513                 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1514
1515                 drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
1516                            entry->count,
1517                            topology_ref_type_to_str(entry->type),
1518                            ts_nsec, rem_nsec / 1000, buf);
1519         }
1520
1521         /* Now free the history, since this is the only time we expose it */
1522         kfree(history->entries);
1523 out:
1524         kfree(buf);
1525 }
1526
1527 static __always_inline void
1528 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1529 {
1530         __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1531                                     "MSTB");
1532 }
1533
1534 static __always_inline void
1535 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1536 {
1537         __dump_topology_ref_history(&port->topology_ref_history, port,
1538                                     "Port");
1539 }
1540
1541 static __always_inline void
1542 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1543                        enum drm_dp_mst_topology_ref_type type)
1544 {
1545         __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1546 }
1547
1548 static __always_inline void
1549 save_port_topology_ref(struct drm_dp_mst_port *port,
1550                        enum drm_dp_mst_topology_ref_type type)
1551 {
1552         __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1553 }
1554
1555 static inline void
1556 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1557 {
1558         mutex_lock(&mgr->topology_ref_history_lock);
1559 }
1560
1561 static inline void
1562 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1563 {
1564         mutex_unlock(&mgr->topology_ref_history_lock);
1565 }
1566 #else
1567 static inline void
1568 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1569 static inline void
1570 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1571 static inline void
1572 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1573 static inline void
1574 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1575 #define save_mstb_topology_ref(mstb, type)
1576 #define save_port_topology_ref(port, type)
1577 #endif
1578
1579 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1580 {
1581         struct drm_dp_mst_branch *mstb =
1582                 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1583         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1584
1585         drm_dp_mst_dump_mstb_topology_history(mstb);
1586
1587         INIT_LIST_HEAD(&mstb->destroy_next);
1588
1589         /*
1590          * This can get called under mgr->mutex, so we need to perform the
1591          * actual destruction of the mstb in another worker
1592          */
1593         mutex_lock(&mgr->delayed_destroy_lock);
1594         list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1595         mutex_unlock(&mgr->delayed_destroy_lock);
1596         schedule_work(&mgr->delayed_destroy_work);
1597 }
1598
1599 /**
1600  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1601  * branch device unless it's zero
1602  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1603  *
1604  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1605  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1606  * reached 0). Holding a topology reference implies that a malloc reference
1607  * will be held to @mstb as long as the user holds the topology reference.
1608  *
1609  * Care should be taken to ensure that the user has at least one malloc
1610  * reference to @mstb. If you already have a topology reference to @mstb, you
1611  * should use drm_dp_mst_topology_get_mstb() instead.
1612  *
1613  * See also:
1614  * drm_dp_mst_topology_get_mstb()
1615  * drm_dp_mst_topology_put_mstb()
1616  *
1617  * Returns:
1618  * * 1: A topology reference was grabbed successfully
1619  * * 0: @port is no longer in the topology, no reference was grabbed
1620  */
1621 static int __must_check
1622 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1623 {
1624         int ret;
1625
1626         topology_ref_history_lock(mstb->mgr);
1627         ret = kref_get_unless_zero(&mstb->topology_kref);
1628         if (ret) {
1629                 DRM_DEBUG("mstb %p (%d)\n",
1630                           mstb, kref_read(&mstb->topology_kref));
1631                 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1632         }
1633
1634         topology_ref_history_unlock(mstb->mgr);
1635
1636         return ret;
1637 }
1638
1639 /**
1640  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1641  * branch device
1642  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1643  *
1644  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1645  * not it's already reached 0. This is only valid to use in scenarios where
1646  * you are already guaranteed to have at least one active topology reference
1647  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1648  *
1649  * See also:
1650  * drm_dp_mst_topology_try_get_mstb()
1651  * drm_dp_mst_topology_put_mstb()
1652  */
1653 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1654 {
1655         topology_ref_history_lock(mstb->mgr);
1656
1657         save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1658         WARN_ON(kref_read(&mstb->topology_kref) == 0);
1659         kref_get(&mstb->topology_kref);
1660         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1661
1662         topology_ref_history_unlock(mstb->mgr);
1663 }
1664
1665 /**
1666  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1667  * device
1668  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1669  *
1670  * Releases a topology reference from @mstb by decrementing
1671  * &drm_dp_mst_branch.topology_kref.
1672  *
1673  * See also:
1674  * drm_dp_mst_topology_try_get_mstb()
1675  * drm_dp_mst_topology_get_mstb()
1676  */
1677 static void
1678 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1679 {
1680         topology_ref_history_lock(mstb->mgr);
1681
1682         DRM_DEBUG("mstb %p (%d)\n",
1683                   mstb, kref_read(&mstb->topology_kref) - 1);
1684         save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1685
1686         topology_ref_history_unlock(mstb->mgr);
1687         kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1688 }
1689
1690 static void drm_dp_destroy_port(struct kref *kref)
1691 {
1692         struct drm_dp_mst_port *port =
1693                 container_of(kref, struct drm_dp_mst_port, topology_kref);
1694         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1695
1696         drm_dp_mst_dump_port_topology_history(port);
1697
1698         /* There's nothing that needs locking to destroy an input port yet */
1699         if (port->input) {
1700                 drm_dp_mst_put_port_malloc(port);
1701                 return;
1702         }
1703
1704         kfree(port->cached_edid);
1705
1706         /*
1707          * we can't destroy the connector here, as we might be holding the
1708          * mode_config.mutex from an EDID retrieval
1709          */
1710         mutex_lock(&mgr->delayed_destroy_lock);
1711         list_add(&port->next, &mgr->destroy_port_list);
1712         mutex_unlock(&mgr->delayed_destroy_lock);
1713         schedule_work(&mgr->delayed_destroy_work);
1714 }
1715
1716 /**
1717  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1718  * port unless it's zero
1719  * @port: &struct drm_dp_mst_port to increment the topology refcount of
1720  *
1721  * Attempts to grab a topology reference to @port, if it hasn't yet been
1722  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1723  * 0). Holding a topology reference implies that a malloc reference will be
1724  * held to @port as long as the user holds the topology reference.
1725  *
1726  * Care should be taken to ensure that the user has at least one malloc
1727  * reference to @port. If you already have a topology reference to @port, you
1728  * should use drm_dp_mst_topology_get_port() instead.
1729  *
1730  * See also:
1731  * drm_dp_mst_topology_get_port()
1732  * drm_dp_mst_topology_put_port()
1733  *
1734  * Returns:
1735  * * 1: A topology reference was grabbed successfully
1736  * * 0: @port is no longer in the topology, no reference was grabbed
1737  */
1738 static int __must_check
1739 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1740 {
1741         int ret;
1742
1743         topology_ref_history_lock(port->mgr);
1744         ret = kref_get_unless_zero(&port->topology_kref);
1745         if (ret) {
1746                 DRM_DEBUG("port %p (%d)\n",
1747                           port, kref_read(&port->topology_kref));
1748                 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1749         }
1750
1751         topology_ref_history_unlock(port->mgr);
1752         return ret;
1753 }
1754
1755 /**
1756  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1757  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1758  *
1759  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1760  * not it's already reached 0. This is only valid to use in scenarios where
1761  * you are already guaranteed to have at least one active topology reference
1762  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1763  *
1764  * See also:
1765  * drm_dp_mst_topology_try_get_port()
1766  * drm_dp_mst_topology_put_port()
1767  */
1768 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1769 {
1770         topology_ref_history_lock(port->mgr);
1771
1772         WARN_ON(kref_read(&port->topology_kref) == 0);
1773         kref_get(&port->topology_kref);
1774         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1775         save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1776
1777         topology_ref_history_unlock(port->mgr);
1778 }
1779
1780 /**
1781  * drm_dp_mst_topology_put_port() - release a topology reference to a port
1782  * @port: The &struct drm_dp_mst_port to release the topology reference from
1783  *
1784  * Releases a topology reference from @port by decrementing
1785  * &drm_dp_mst_port.topology_kref.
1786  *
1787  * See also:
1788  * drm_dp_mst_topology_try_get_port()
1789  * drm_dp_mst_topology_get_port()
1790  */
1791 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1792 {
1793         topology_ref_history_lock(port->mgr);
1794
1795         DRM_DEBUG("port %p (%d)\n",
1796                   port, kref_read(&port->topology_kref) - 1);
1797         save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1798
1799         topology_ref_history_unlock(port->mgr);
1800         kref_put(&port->topology_kref, drm_dp_destroy_port);
1801 }
1802
1803 static struct drm_dp_mst_branch *
1804 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1805                                               struct drm_dp_mst_branch *to_find)
1806 {
1807         struct drm_dp_mst_port *port;
1808         struct drm_dp_mst_branch *rmstb;
1809
1810         if (to_find == mstb)
1811                 return mstb;
1812
1813         list_for_each_entry(port, &mstb->ports, next) {
1814                 if (port->mstb) {
1815                         rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1816                             port->mstb, to_find);
1817                         if (rmstb)
1818                                 return rmstb;
1819                 }
1820         }
1821         return NULL;
1822 }
1823
1824 static struct drm_dp_mst_branch *
1825 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1826                                        struct drm_dp_mst_branch *mstb)
1827 {
1828         struct drm_dp_mst_branch *rmstb = NULL;
1829
1830         mutex_lock(&mgr->lock);
1831         if (mgr->mst_primary) {
1832                 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1833                     mgr->mst_primary, mstb);
1834
1835                 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1836                         rmstb = NULL;
1837         }
1838         mutex_unlock(&mgr->lock);
1839         return rmstb;
1840 }
1841
1842 static struct drm_dp_mst_port *
1843 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1844                                               struct drm_dp_mst_port *to_find)
1845 {
1846         struct drm_dp_mst_port *port, *mport;
1847
1848         list_for_each_entry(port, &mstb->ports, next) {
1849                 if (port == to_find)
1850                         return port;
1851
1852                 if (port->mstb) {
1853                         mport = drm_dp_mst_topology_get_port_validated_locked(
1854                             port->mstb, to_find);
1855                         if (mport)
1856                                 return mport;
1857                 }
1858         }
1859         return NULL;
1860 }
1861
1862 static struct drm_dp_mst_port *
1863 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1864                                        struct drm_dp_mst_port *port)
1865 {
1866         struct drm_dp_mst_port *rport = NULL;
1867
1868         mutex_lock(&mgr->lock);
1869         if (mgr->mst_primary) {
1870                 rport = drm_dp_mst_topology_get_port_validated_locked(
1871                     mgr->mst_primary, port);
1872
1873                 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1874                         rport = NULL;
1875         }
1876         mutex_unlock(&mgr->lock);
1877         return rport;
1878 }
1879
1880 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1881 {
1882         struct drm_dp_mst_port *port;
1883         int ret;
1884
1885         list_for_each_entry(port, &mstb->ports, next) {
1886                 if (port->port_num == port_num) {
1887                         ret = drm_dp_mst_topology_try_get_port(port);
1888                         return ret ? port : NULL;
1889                 }
1890         }
1891
1892         return NULL;
1893 }
1894
1895 /*
1896  * calculate a new RAD for this MST branch device
1897  * if parent has an LCT of 2 then it has 1 nibble of RAD,
1898  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1899  */
1900 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1901                                  u8 *rad)
1902 {
1903         int parent_lct = port->parent->lct;
1904         int shift = 4;
1905         int idx = (parent_lct - 1) / 2;
1906         if (parent_lct > 1) {
1907                 memcpy(rad, port->parent->rad, idx + 1);
1908                 shift = (parent_lct % 2) ? 4 : 0;
1909         } else
1910                 rad[0] = 0;
1911
1912         rad[idx] |= port->port_num << shift;
1913         return parent_lct + 1;
1914 }
1915
1916 static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt)
1917 {
1918         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1919         struct drm_dp_mst_branch *mstb;
1920         u8 rad[8], lct;
1921         int ret = 0;
1922
1923         if (port->pdt == new_pdt)
1924                 return 0;
1925
1926         /* Teardown the old pdt, if there is one */
1927         switch (port->pdt) {
1928         case DP_PEER_DEVICE_DP_LEGACY_CONV:
1929         case DP_PEER_DEVICE_SST_SINK:
1930                 /*
1931                  * If the new PDT would also have an i2c bus, don't bother
1932                  * with reregistering it
1933                  */
1934                 if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1935                     new_pdt == DP_PEER_DEVICE_SST_SINK) {
1936                         port->pdt = new_pdt;
1937                         return 0;
1938                 }
1939
1940                 /* remove i2c over sideband */
1941                 drm_dp_mst_unregister_i2c_bus(&port->aux);
1942                 break;
1943         case DP_PEER_DEVICE_MST_BRANCHING:
1944                 mutex_lock(&mgr->lock);
1945                 drm_dp_mst_topology_put_mstb(port->mstb);
1946                 port->mstb = NULL;
1947                 mutex_unlock(&mgr->lock);
1948                 break;
1949         }
1950
1951         port->pdt = new_pdt;
1952         switch (port->pdt) {
1953         case DP_PEER_DEVICE_DP_LEGACY_CONV:
1954         case DP_PEER_DEVICE_SST_SINK:
1955                 /* add i2c over sideband */
1956                 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1957                 break;
1958
1959         case DP_PEER_DEVICE_MST_BRANCHING:
1960                 lct = drm_dp_calculate_rad(port, rad);
1961                 mstb = drm_dp_add_mst_branch_device(lct, rad);
1962                 if (!mstb) {
1963                         ret = -ENOMEM;
1964                         DRM_ERROR("Failed to create MSTB for port %p", port);
1965                         goto out;
1966                 }
1967
1968                 mutex_lock(&mgr->lock);
1969                 port->mstb = mstb;
1970                 mstb->mgr = port->mgr;
1971                 mstb->port_parent = port;
1972
1973                 /*
1974                  * Make sure this port's memory allocation stays
1975                  * around until its child MSTB releases it
1976                  */
1977                 drm_dp_mst_get_port_malloc(port);
1978                 mutex_unlock(&mgr->lock);
1979
1980                 /* And make sure we send a link address for this */
1981                 ret = 1;
1982                 break;
1983         }
1984
1985 out:
1986         if (ret < 0)
1987                 port->pdt = DP_PEER_DEVICE_NONE;
1988         return ret;
1989 }
1990
1991 /**
1992  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
1993  * @aux: Fake sideband AUX CH
1994  * @offset: address of the (first) register to read
1995  * @buffer: buffer to store the register values
1996  * @size: number of bytes in @buffer
1997  *
1998  * Performs the same functionality for remote devices via
1999  * sideband messaging as drm_dp_dpcd_read() does for local
2000  * devices via actual AUX CH.
2001  *
2002  * Return: Number of bytes read, or negative error code on failure.
2003  */
2004 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2005                              unsigned int offset, void *buffer, size_t size)
2006 {
2007         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2008                                                     aux);
2009
2010         return drm_dp_send_dpcd_read(port->mgr, port,
2011                                      offset, size, buffer);
2012 }
2013
2014 /**
2015  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2016  * @aux: Fake sideband AUX CH
2017  * @offset: address of the (first) register to write
2018  * @buffer: buffer containing the values to write
2019  * @size: number of bytes in @buffer
2020  *
2021  * Performs the same functionality for remote devices via
2022  * sideband messaging as drm_dp_dpcd_write() does for local
2023  * devices via actual AUX CH.
2024  *
2025  * Return: 0 on success, negative error code on failure.
2026  */
2027 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2028                               unsigned int offset, void *buffer, size_t size)
2029 {
2030         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2031                                                     aux);
2032
2033         return drm_dp_send_dpcd_write(port->mgr, port,
2034                                       offset, size, buffer);
2035 }
2036
2037 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2038 {
2039         int ret;
2040
2041         memcpy(mstb->guid, guid, 16);
2042
2043         if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2044                 if (mstb->port_parent) {
2045                         ret = drm_dp_send_dpcd_write(
2046                                         mstb->mgr,
2047                                         mstb->port_parent,
2048                                         DP_GUID,
2049                                         16,
2050                                         mstb->guid);
2051                 } else {
2052
2053                         ret = drm_dp_dpcd_write(
2054                                         mstb->mgr->aux,
2055                                         DP_GUID,
2056                                         mstb->guid,
2057                                         16);
2058                 }
2059         }
2060 }
2061
2062 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2063                                 int pnum,
2064                                 char *proppath,
2065                                 size_t proppath_size)
2066 {
2067         int i;
2068         char temp[8];
2069         snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2070         for (i = 0; i < (mstb->lct - 1); i++) {
2071                 int shift = (i % 2) ? 0 : 4;
2072                 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2073                 snprintf(temp, sizeof(temp), "-%d", port_num);
2074                 strlcat(proppath, temp, proppath_size);
2075         }
2076         snprintf(temp, sizeof(temp), "-%d", pnum);
2077         strlcat(proppath, temp, proppath_size);
2078 }
2079
2080 /**
2081  * drm_dp_mst_connector_late_register() - Late MST connector registration
2082  * @connector: The MST connector
2083  * @port: The MST port for this connector
2084  *
2085  * Helper to register the remote aux device for this MST port. Drivers should
2086  * call this from their mst connector's late_register hook to enable MST aux
2087  * devices.
2088  *
2089  * Return: 0 on success, negative error code on failure.
2090  */
2091 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2092                                        struct drm_dp_mst_port *port)
2093 {
2094         DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2095                       port->aux.name, connector->kdev->kobj.name);
2096
2097         port->aux.dev = connector->kdev;
2098         return drm_dp_aux_register_devnode(&port->aux);
2099 }
2100 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2101
2102 /**
2103  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2104  * @connector: The MST connector
2105  * @port: The MST port for this connector
2106  *
2107  * Helper to unregister the remote aux device for this MST port, registered by
2108  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2109  * connector's early_unregister hook.
2110  */
2111 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2112                                            struct drm_dp_mst_port *port)
2113 {
2114         DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2115                       port->aux.name, connector->kdev->kobj.name);
2116         drm_dp_aux_unregister_devnode(&port->aux);
2117 }
2118 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2119
2120 static void
2121 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2122                               struct drm_dp_mst_port *port)
2123 {
2124         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2125         char proppath[255];
2126         int ret;
2127
2128         build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2129         port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2130         if (!port->connector) {
2131                 ret = -ENOMEM;
2132                 goto error;
2133         }
2134
2135         if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
2136              port->pdt == DP_PEER_DEVICE_SST_SINK) &&
2137             port->port_num >= DP_MST_LOGICAL_PORT_0) {
2138                 port->cached_edid = drm_get_edid(port->connector,
2139                                                  &port->aux.ddc);
2140                 drm_connector_set_tile_property(port->connector);
2141         }
2142
2143         mgr->cbs->register_connector(port->connector);
2144         return;
2145
2146 error:
2147         DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2148 }
2149
2150 /*
2151  * Drop a topology reference, and unlink the port from the in-memory topology
2152  * layout
2153  */
2154 static void
2155 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2156                                 struct drm_dp_mst_port *port)
2157 {
2158         mutex_lock(&mgr->lock);
2159         list_del(&port->next);
2160         mutex_unlock(&mgr->lock);
2161         drm_dp_mst_topology_put_port(port);
2162 }
2163
2164 static struct drm_dp_mst_port *
2165 drm_dp_mst_add_port(struct drm_device *dev,
2166                     struct drm_dp_mst_topology_mgr *mgr,
2167                     struct drm_dp_mst_branch *mstb, u8 port_number)
2168 {
2169         struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2170
2171         if (!port)
2172                 return NULL;
2173
2174         kref_init(&port->topology_kref);
2175         kref_init(&port->malloc_kref);
2176         port->parent = mstb;
2177         port->port_num = port_number;
2178         port->mgr = mgr;
2179         port->aux.name = "DPMST";
2180         port->aux.dev = dev->dev;
2181         port->aux.is_remote = true;
2182
2183         /*
2184          * Make sure the memory allocation for our parent branch stays
2185          * around until our own memory allocation is released
2186          */
2187         drm_dp_mst_get_mstb_malloc(mstb);
2188
2189         return port;
2190 }
2191
2192 static int
2193 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2194                                     struct drm_device *dev,
2195                                     struct drm_dp_link_addr_reply_port *port_msg)
2196 {
2197         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2198         struct drm_dp_mst_port *port;
2199         int old_ddps = 0, ret;
2200         u8 new_pdt = DP_PEER_DEVICE_NONE;
2201         bool created = false, send_link_addr = false, changed = false;
2202
2203         port = drm_dp_get_port(mstb, port_msg->port_number);
2204         if (!port) {
2205                 port = drm_dp_mst_add_port(dev, mgr, mstb,
2206                                            port_msg->port_number);
2207                 if (!port)
2208                         return -ENOMEM;
2209                 created = true;
2210                 changed = true;
2211         } else if (!port->input && port_msg->input_port && port->connector) {
2212                 /* Since port->connector can't be changed here, we create a
2213                  * new port if input_port changes from 0 to 1
2214                  */
2215                 drm_dp_mst_topology_unlink_port(mgr, port);
2216                 drm_dp_mst_topology_put_port(port);
2217                 port = drm_dp_mst_add_port(dev, mgr, mstb,
2218                                            port_msg->port_number);
2219                 if (!port)
2220                         return -ENOMEM;
2221                 changed = true;
2222                 created = true;
2223         } else if (port->input && !port_msg->input_port) {
2224                 changed = true;
2225         } else if (port->connector) {
2226                 /* We're updating a port that's exposed to userspace, so do it
2227                  * under lock
2228                  */
2229                 drm_modeset_lock(&mgr->base.lock, NULL);
2230
2231                 old_ddps = port->ddps;
2232                 changed = port->ddps != port_msg->ddps ||
2233                         (port->ddps &&
2234                          (port->ldps != port_msg->legacy_device_plug_status ||
2235                           port->dpcd_rev != port_msg->dpcd_revision ||
2236                           port->mcs != port_msg->mcs ||
2237                           port->pdt != port_msg->peer_device_type ||
2238                           port->num_sdp_stream_sinks !=
2239                           port_msg->num_sdp_stream_sinks));
2240         }
2241
2242         port->input = port_msg->input_port;
2243         if (!port->input)
2244                 new_pdt = port_msg->peer_device_type;
2245         port->mcs = port_msg->mcs;
2246         port->ddps = port_msg->ddps;
2247         port->ldps = port_msg->legacy_device_plug_status;
2248         port->dpcd_rev = port_msg->dpcd_revision;
2249         port->num_sdp_streams = port_msg->num_sdp_streams;
2250         port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2251
2252         /* manage mstb port lists with mgr lock - take a reference
2253            for this list */
2254         if (created) {
2255                 mutex_lock(&mgr->lock);
2256                 drm_dp_mst_topology_get_port(port);
2257                 list_add(&port->next, &mstb->ports);
2258                 mutex_unlock(&mgr->lock);
2259         }
2260
2261         if (old_ddps != port->ddps) {
2262                 if (port->ddps) {
2263                         if (!port->input) {
2264                                 drm_dp_send_enum_path_resources(mgr, mstb,
2265                                                                 port);
2266                         }
2267                 } else {
2268                         port->available_pbn = 0;
2269                 }
2270         }
2271
2272         ret = drm_dp_port_set_pdt(port, new_pdt);
2273         if (ret == 1) {
2274                 send_link_addr = true;
2275         } else if (ret < 0) {
2276                 DRM_ERROR("Failed to change PDT on port %p: %d\n",
2277                           port, ret);
2278                 goto fail;
2279         }
2280
2281         /*
2282          * If this port wasn't just created, then we're reprobing because
2283          * we're coming out of suspend. In this case, always resend the link
2284          * address if there's an MSTB on this port
2285          */
2286         if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING)
2287                 send_link_addr = true;
2288
2289         if (port->connector)
2290                 drm_modeset_unlock(&mgr->base.lock);
2291         else if (!port->input)
2292                 drm_dp_mst_port_add_connector(mstb, port);
2293
2294         if (send_link_addr && port->mstb) {
2295                 ret = drm_dp_send_link_address(mgr, port->mstb);
2296                 if (ret == 1) /* MSTB below us changed */
2297                         changed = true;
2298                 else if (ret < 0)
2299                         goto fail_put;
2300         }
2301
2302         /* put reference to this port */
2303         drm_dp_mst_topology_put_port(port);
2304         return changed;
2305
2306 fail:
2307         drm_dp_mst_topology_unlink_port(mgr, port);
2308         if (port->connector)
2309                 drm_modeset_unlock(&mgr->base.lock);
2310 fail_put:
2311         drm_dp_mst_topology_put_port(port);
2312         return ret;
2313 }
2314
2315 static void
2316 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2317                             struct drm_dp_connection_status_notify *conn_stat)
2318 {
2319         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2320         struct drm_dp_mst_port *port;
2321         int old_ddps, ret;
2322         u8 new_pdt;
2323         bool dowork = false, create_connector = false;
2324
2325         port = drm_dp_get_port(mstb, conn_stat->port_number);
2326         if (!port)
2327                 return;
2328
2329         if (port->connector) {
2330                 if (!port->input && conn_stat->input_port) {
2331                         /*
2332                          * We can't remove a connector from an already exposed
2333                          * port, so just throw the port out and make sure we
2334                          * reprobe the link address of it's parent MSTB
2335                          */
2336                         drm_dp_mst_topology_unlink_port(mgr, port);
2337                         mstb->link_address_sent = false;
2338                         dowork = true;
2339                         goto out;
2340                 }
2341
2342                 /* Locking is only needed if the port's exposed to userspace */
2343                 drm_modeset_lock(&mgr->base.lock, NULL);
2344         } else if (port->input && !conn_stat->input_port) {
2345                 create_connector = true;
2346                 /* Reprobe link address so we get num_sdp_streams */
2347                 mstb->link_address_sent = false;
2348                 dowork = true;
2349         }
2350
2351         old_ddps = port->ddps;
2352         port->input = conn_stat->input_port;
2353         port->mcs = conn_stat->message_capability_status;
2354         port->ldps = conn_stat->legacy_device_plug_status;
2355         port->ddps = conn_stat->displayport_device_plug_status;
2356
2357         if (old_ddps != port->ddps) {
2358                 if (port->ddps) {
2359                         dowork = true;
2360                 } else {
2361                         port->available_pbn = 0;
2362                 }
2363         }
2364
2365         new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2366
2367         ret = drm_dp_port_set_pdt(port, new_pdt);
2368         if (ret == 1) {
2369                 dowork = true;
2370         } else if (ret < 0) {
2371                 DRM_ERROR("Failed to change PDT for port %p: %d\n",
2372                           port, ret);
2373                 dowork = false;
2374         }
2375
2376         if (port->connector)
2377                 drm_modeset_unlock(&mgr->base.lock);
2378         else if (create_connector)
2379                 drm_dp_mst_port_add_connector(mstb, port);
2380
2381 out:
2382         drm_dp_mst_topology_put_port(port);
2383         if (dowork)
2384                 queue_work(system_long_wq, &mstb->mgr->work);
2385 }
2386
2387 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2388                                                                u8 lct, u8 *rad)
2389 {
2390         struct drm_dp_mst_branch *mstb;
2391         struct drm_dp_mst_port *port;
2392         int i, ret;
2393         /* find the port by iterating down */
2394
2395         mutex_lock(&mgr->lock);
2396         mstb = mgr->mst_primary;
2397
2398         if (!mstb)
2399                 goto out;
2400
2401         for (i = 0; i < lct - 1; i++) {
2402                 int shift = (i % 2) ? 0 : 4;
2403                 int port_num = (rad[i / 2] >> shift) & 0xf;
2404
2405                 list_for_each_entry(port, &mstb->ports, next) {
2406                         if (port->port_num == port_num) {
2407                                 mstb = port->mstb;
2408                                 if (!mstb) {
2409                                         DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2410                                         goto out;
2411                                 }
2412
2413                                 break;
2414                         }
2415                 }
2416         }
2417         ret = drm_dp_mst_topology_try_get_mstb(mstb);
2418         if (!ret)
2419                 mstb = NULL;
2420 out:
2421         mutex_unlock(&mgr->lock);
2422         return mstb;
2423 }
2424
2425 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2426         struct drm_dp_mst_branch *mstb,
2427         const uint8_t *guid)
2428 {
2429         struct drm_dp_mst_branch *found_mstb;
2430         struct drm_dp_mst_port *port;
2431
2432         if (memcmp(mstb->guid, guid, 16) == 0)
2433                 return mstb;
2434
2435
2436         list_for_each_entry(port, &mstb->ports, next) {
2437                 if (!port->mstb)
2438                         continue;
2439
2440                 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2441
2442                 if (found_mstb)
2443                         return found_mstb;
2444         }
2445
2446         return NULL;
2447 }
2448
2449 static struct drm_dp_mst_branch *
2450 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2451                                      const uint8_t *guid)
2452 {
2453         struct drm_dp_mst_branch *mstb;
2454         int ret;
2455
2456         /* find the port by iterating down */
2457         mutex_lock(&mgr->lock);
2458
2459         mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2460         if (mstb) {
2461                 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2462                 if (!ret)
2463                         mstb = NULL;
2464         }
2465
2466         mutex_unlock(&mgr->lock);
2467         return mstb;
2468 }
2469
2470 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2471                                                struct drm_dp_mst_branch *mstb)
2472 {
2473         struct drm_dp_mst_port *port;
2474         int ret;
2475         bool changed = false;
2476
2477         if (!mstb->link_address_sent) {
2478                 ret = drm_dp_send_link_address(mgr, mstb);
2479                 if (ret == 1)
2480                         changed = true;
2481                 else if (ret < 0)
2482                         return ret;
2483         }
2484
2485         list_for_each_entry(port, &mstb->ports, next) {
2486                 struct drm_dp_mst_branch *mstb_child = NULL;
2487
2488                 if (port->input || !port->ddps)
2489                         continue;
2490
2491                 if (!port->available_pbn) {
2492                         drm_modeset_lock(&mgr->base.lock, NULL);
2493                         drm_dp_send_enum_path_resources(mgr, mstb, port);
2494                         drm_modeset_unlock(&mgr->base.lock);
2495                         changed = true;
2496                 }
2497
2498                 if (port->mstb)
2499                         mstb_child = drm_dp_mst_topology_get_mstb_validated(
2500                             mgr, port->mstb);
2501
2502                 if (mstb_child) {
2503                         ret = drm_dp_check_and_send_link_address(mgr,
2504                                                                  mstb_child);
2505                         drm_dp_mst_topology_put_mstb(mstb_child);
2506                         if (ret == 1)
2507                                 changed = true;
2508                         else if (ret < 0)
2509                                 return ret;
2510                 }
2511         }
2512
2513         return changed;
2514 }
2515
2516 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2517 {
2518         struct drm_dp_mst_topology_mgr *mgr =
2519                 container_of(work, struct drm_dp_mst_topology_mgr, work);
2520         struct drm_device *dev = mgr->dev;
2521         struct drm_dp_mst_branch *mstb;
2522         int ret;
2523
2524         mutex_lock(&mgr->probe_lock);
2525
2526         mutex_lock(&mgr->lock);
2527         mstb = mgr->mst_primary;
2528         if (mstb) {
2529                 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2530                 if (!ret)
2531                         mstb = NULL;
2532         }
2533         mutex_unlock(&mgr->lock);
2534         if (!mstb) {
2535                 mutex_unlock(&mgr->probe_lock);
2536                 return;
2537         }
2538
2539         ret = drm_dp_check_and_send_link_address(mgr, mstb);
2540         drm_dp_mst_topology_put_mstb(mstb);
2541
2542         mutex_unlock(&mgr->probe_lock);
2543         if (ret)
2544                 drm_kms_helper_hotplug_event(dev);
2545 }
2546
2547 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2548                                  u8 *guid)
2549 {
2550         u64 salt;
2551
2552         if (memchr_inv(guid, 0, 16))
2553                 return true;
2554
2555         salt = get_jiffies_64();
2556
2557         memcpy(&guid[0], &salt, sizeof(u64));
2558         memcpy(&guid[8], &salt, sizeof(u64));
2559
2560         return false;
2561 }
2562
2563 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2564 {
2565         struct drm_dp_sideband_msg_req_body req;
2566
2567         req.req_type = DP_REMOTE_DPCD_READ;
2568         req.u.dpcd_read.port_number = port_num;
2569         req.u.dpcd_read.dpcd_address = offset;
2570         req.u.dpcd_read.num_bytes = num_bytes;
2571         drm_dp_encode_sideband_req(&req, msg);
2572
2573         return 0;
2574 }
2575
2576 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2577                                     bool up, u8 *msg, int len)
2578 {
2579         int ret;
2580         int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2581         int tosend, total, offset;
2582         int retries = 0;
2583
2584 retry:
2585         total = len;
2586         offset = 0;
2587         do {
2588                 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2589
2590                 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2591                                         &msg[offset],
2592                                         tosend);
2593                 if (ret != tosend) {
2594                         if (ret == -EIO && retries < 5) {
2595                                 retries++;
2596                                 goto retry;
2597                         }
2598                         DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2599
2600                         return -EIO;
2601                 }
2602                 offset += tosend;
2603                 total -= tosend;
2604         } while (total > 0);
2605         return 0;
2606 }
2607
2608 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2609                                   struct drm_dp_sideband_msg_tx *txmsg)
2610 {
2611         struct drm_dp_mst_branch *mstb = txmsg->dst;
2612         u8 req_type;
2613
2614         /* both msg slots are full */
2615         if (txmsg->seqno == -1) {
2616                 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2617                         DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2618                         return -EAGAIN;
2619                 }
2620                 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2621                         txmsg->seqno = mstb->last_seqno;
2622                         mstb->last_seqno ^= 1;
2623                 } else if (mstb->tx_slots[0] == NULL)
2624                         txmsg->seqno = 0;
2625                 else
2626                         txmsg->seqno = 1;
2627                 mstb->tx_slots[txmsg->seqno] = txmsg;
2628         }
2629
2630         req_type = txmsg->msg[0] & 0x7f;
2631         if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2632                 req_type == DP_RESOURCE_STATUS_NOTIFY)
2633                 hdr->broadcast = 1;
2634         else
2635                 hdr->broadcast = 0;
2636         hdr->path_msg = txmsg->path_msg;
2637         hdr->lct = mstb->lct;
2638         hdr->lcr = mstb->lct - 1;
2639         if (mstb->lct > 1)
2640                 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2641         hdr->seqno = txmsg->seqno;
2642         return 0;
2643 }
2644 /*
2645  * process a single block of the next message in the sideband queue
2646  */
2647 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2648                                    struct drm_dp_sideband_msg_tx *txmsg,
2649                                    bool up)
2650 {
2651         u8 chunk[48];
2652         struct drm_dp_sideband_msg_hdr hdr;
2653         int len, space, idx, tosend;
2654         int ret;
2655
2656         memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2657
2658         if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2659                 txmsg->seqno = -1;
2660                 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2661         }
2662
2663         /* make hdr from dst mst - for replies use seqno
2664            otherwise assign one */
2665         ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2666         if (ret < 0)
2667                 return ret;
2668
2669         /* amount left to send in this message */
2670         len = txmsg->cur_len - txmsg->cur_offset;
2671
2672         /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2673         space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2674
2675         tosend = min(len, space);
2676         if (len == txmsg->cur_len)
2677                 hdr.somt = 1;
2678         if (space >= len)
2679                 hdr.eomt = 1;
2680
2681
2682         hdr.msg_len = tosend + 1;
2683         drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2684         memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2685         /* add crc at end */
2686         drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2687         idx += tosend + 1;
2688
2689         ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2690         if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2691                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2692
2693                 drm_printf(&p, "sideband msg failed to send\n");
2694                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2695                 return ret;
2696         }
2697
2698         txmsg->cur_offset += tosend;
2699         if (txmsg->cur_offset == txmsg->cur_len) {
2700                 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2701                 return 1;
2702         }
2703         return 0;
2704 }
2705
2706 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2707 {
2708         struct drm_dp_sideband_msg_tx *txmsg;
2709         int ret;
2710
2711         WARN_ON(!mutex_is_locked(&mgr->qlock));
2712
2713         /* construct a chunk from the first msg in the tx_msg queue */
2714         if (list_empty(&mgr->tx_msg_downq))
2715                 return;
2716
2717         txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2718         ret = process_single_tx_qlock(mgr, txmsg, false);
2719         if (ret == 1) {
2720                 /* txmsg is sent it should be in the slots now */
2721                 list_del(&txmsg->next);
2722         } else if (ret) {
2723                 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2724                 list_del(&txmsg->next);
2725                 if (txmsg->seqno != -1)
2726                         txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2727                 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2728                 wake_up_all(&mgr->tx_waitq);
2729         }
2730 }
2731
2732 /* called holding qlock */
2733 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2734                                        struct drm_dp_sideband_msg_tx *txmsg)
2735 {
2736         int ret;
2737
2738         /* construct a chunk from the first msg in the tx_msg queue */
2739         ret = process_single_tx_qlock(mgr, txmsg, true);
2740
2741         if (ret != 1)
2742                 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2743
2744         if (txmsg->seqno != -1) {
2745                 WARN_ON((unsigned int)txmsg->seqno >
2746                         ARRAY_SIZE(txmsg->dst->tx_slots));
2747                 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2748         }
2749 }
2750
2751 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2752                                  struct drm_dp_sideband_msg_tx *txmsg)
2753 {
2754         mutex_lock(&mgr->qlock);
2755         list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2756
2757         if (drm_debug_enabled(DRM_UT_DP)) {
2758                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2759
2760                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2761         }
2762
2763         if (list_is_singular(&mgr->tx_msg_downq))
2764                 process_single_down_tx_qlock(mgr);
2765         mutex_unlock(&mgr->qlock);
2766 }
2767
2768 static void
2769 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2770 {
2771         struct drm_dp_link_addr_reply_port *port_reply;
2772         int i;
2773
2774         for (i = 0; i < reply->nports; i++) {
2775                 port_reply = &reply->ports[i];
2776                 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2777                               i,
2778                               port_reply->input_port,
2779                               port_reply->peer_device_type,
2780                               port_reply->port_number,
2781                               port_reply->dpcd_revision,
2782                               port_reply->mcs,
2783                               port_reply->ddps,
2784                               port_reply->legacy_device_plug_status,
2785                               port_reply->num_sdp_streams,
2786                               port_reply->num_sdp_stream_sinks);
2787         }
2788 }
2789
2790 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2791                                      struct drm_dp_mst_branch *mstb)
2792 {
2793         struct drm_dp_sideband_msg_tx *txmsg;
2794         struct drm_dp_link_address_ack_reply *reply;
2795         struct drm_dp_mst_port *port, *tmp;
2796         int i, len, ret, port_mask = 0;
2797         bool changed = false;
2798
2799         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2800         if (!txmsg)
2801                 return -ENOMEM;
2802
2803         txmsg->dst = mstb;
2804         len = build_link_address(txmsg);
2805
2806         mstb->link_address_sent = true;
2807         drm_dp_queue_down_tx(mgr, txmsg);
2808
2809         /* FIXME: Actually do some real error handling here */
2810         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2811         if (ret <= 0) {
2812                 DRM_ERROR("Sending link address failed with %d\n", ret);
2813                 goto out;
2814         }
2815         if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2816                 DRM_ERROR("link address NAK received\n");
2817                 ret = -EIO;
2818                 goto out;
2819         }
2820
2821         reply = &txmsg->reply.u.link_addr;
2822         DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2823         drm_dp_dump_link_address(reply);
2824
2825         drm_dp_check_mstb_guid(mstb, reply->guid);
2826
2827         for (i = 0; i < reply->nports; i++) {
2828                 port_mask |= BIT(reply->ports[i].port_number);
2829                 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2830                                                           &reply->ports[i]);
2831                 if (ret == 1)
2832                         changed = true;
2833                 else if (ret < 0)
2834                         goto out;
2835         }
2836
2837         /* Prune any ports that are currently a part of mstb in our in-memory
2838          * topology, but were not seen in this link address. Usually this
2839          * means that they were removed while the topology was out of sync,
2840          * e.g. during suspend/resume
2841          */
2842         mutex_lock(&mgr->lock);
2843         list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2844                 if (port_mask & BIT(port->port_num))
2845                         continue;
2846
2847                 DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2848                               port->port_num);
2849                 list_del(&port->next);
2850                 drm_dp_mst_topology_put_port(port);
2851                 changed = true;
2852         }
2853         mutex_unlock(&mgr->lock);
2854
2855 out:
2856         if (ret <= 0)
2857                 mstb->link_address_sent = false;
2858         kfree(txmsg);
2859         return ret < 0 ? ret : changed;
2860 }
2861
2862 static int
2863 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2864                                 struct drm_dp_mst_branch *mstb,
2865                                 struct drm_dp_mst_port *port)
2866 {
2867         struct drm_dp_enum_path_resources_ack_reply *path_res;
2868         struct drm_dp_sideband_msg_tx *txmsg;
2869         int len;
2870         int ret;
2871
2872         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2873         if (!txmsg)
2874                 return -ENOMEM;
2875
2876         txmsg->dst = mstb;
2877         len = build_enum_path_resources(txmsg, port->port_num);
2878
2879         drm_dp_queue_down_tx(mgr, txmsg);
2880
2881         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2882         if (ret > 0) {
2883                 path_res = &txmsg->reply.u.path_resources;
2884
2885                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2886                         DRM_DEBUG_KMS("enum path resources nak received\n");
2887                 } else {
2888                         if (port->port_num != path_res->port_number)
2889                                 DRM_ERROR("got incorrect port in response\n");
2890
2891                         DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2892                                       path_res->port_number,
2893                                       path_res->full_payload_bw_number,
2894                                       path_res->avail_payload_bw_number);
2895                         port->available_pbn =
2896                                 path_res->avail_payload_bw_number;
2897                 }
2898         }
2899
2900         kfree(txmsg);
2901         return 0;
2902 }
2903
2904 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2905 {
2906         if (!mstb->port_parent)
2907                 return NULL;
2908
2909         if (mstb->port_parent->mstb != mstb)
2910                 return mstb->port_parent;
2911
2912         return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2913 }
2914
2915 /*
2916  * Searches upwards in the topology starting from mstb to try to find the
2917  * closest available parent of mstb that's still connected to the rest of the
2918  * topology. This can be used in order to perform operations like releasing
2919  * payloads, where the branch device which owned the payload may no longer be
2920  * around and thus would require that the payload on the last living relative
2921  * be freed instead.
2922  */
2923 static struct drm_dp_mst_branch *
2924 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2925                                         struct drm_dp_mst_branch *mstb,
2926                                         int *port_num)
2927 {
2928         struct drm_dp_mst_branch *rmstb = NULL;
2929         struct drm_dp_mst_port *found_port;
2930
2931         mutex_lock(&mgr->lock);
2932         if (!mgr->mst_primary)
2933                 goto out;
2934
2935         do {
2936                 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2937                 if (!found_port)
2938                         break;
2939
2940                 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2941                         rmstb = found_port->parent;
2942                         *port_num = found_port->port_num;
2943                 } else {
2944                         /* Search again, starting from this parent */
2945                         mstb = found_port->parent;
2946                 }
2947         } while (!rmstb);
2948 out:
2949         mutex_unlock(&mgr->lock);
2950         return rmstb;
2951 }
2952
2953 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2954                                    struct drm_dp_mst_port *port,
2955                                    int id,
2956                                    int pbn)
2957 {
2958         struct drm_dp_sideband_msg_tx *txmsg;
2959         struct drm_dp_mst_branch *mstb;
2960         int len, ret, port_num;
2961         u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2962         int i;
2963
2964         port_num = port->port_num;
2965         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2966         if (!mstb) {
2967                 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2968                                                                port->parent,
2969                                                                &port_num);
2970
2971                 if (!mstb)
2972                         return -EINVAL;
2973         }
2974
2975         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2976         if (!txmsg) {
2977                 ret = -ENOMEM;
2978                 goto fail_put;
2979         }
2980
2981         for (i = 0; i < port->num_sdp_streams; i++)
2982                 sinks[i] = i;
2983
2984         txmsg->dst = mstb;
2985         len = build_allocate_payload(txmsg, port_num,
2986                                      id,
2987                                      pbn, port->num_sdp_streams, sinks);
2988
2989         drm_dp_queue_down_tx(mgr, txmsg);
2990
2991         /*
2992          * FIXME: there is a small chance that between getting the last
2993          * connected mstb and sending the payload message, the last connected
2994          * mstb could also be removed from the topology. In the future, this
2995          * needs to be fixed by restarting the
2996          * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2997          * timeout if the topology is still connected to the system.
2998          */
2999         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3000         if (ret > 0) {
3001                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3002                         ret = -EINVAL;
3003                 else
3004                         ret = 0;
3005         }
3006         kfree(txmsg);
3007 fail_put:
3008         drm_dp_mst_topology_put_mstb(mstb);
3009         return ret;
3010 }
3011
3012 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3013                                  struct drm_dp_mst_port *port, bool power_up)
3014 {
3015         struct drm_dp_sideband_msg_tx *txmsg;
3016         int len, ret;
3017
3018         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3019         if (!port)
3020                 return -EINVAL;
3021
3022         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3023         if (!txmsg) {
3024                 drm_dp_mst_topology_put_port(port);
3025                 return -ENOMEM;
3026         }
3027
3028         txmsg->dst = port->parent;
3029         len = build_power_updown_phy(txmsg, port->port_num, power_up);
3030         drm_dp_queue_down_tx(mgr, txmsg);
3031
3032         ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3033         if (ret > 0) {
3034                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3035                         ret = -EINVAL;
3036                 else
3037                         ret = 0;
3038         }
3039         kfree(txmsg);
3040         drm_dp_mst_topology_put_port(port);
3041
3042         return ret;
3043 }
3044 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3045
3046 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3047                                        int id,
3048                                        struct drm_dp_payload *payload)
3049 {
3050         int ret;
3051
3052         ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3053         if (ret < 0) {
3054                 payload->payload_state = 0;
3055                 return ret;
3056         }
3057         payload->payload_state = DP_PAYLOAD_LOCAL;
3058         return 0;
3059 }
3060
3061 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3062                                        struct drm_dp_mst_port *port,
3063                                        int id,
3064                                        struct drm_dp_payload *payload)
3065 {
3066         int ret;
3067         ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3068         if (ret < 0)
3069                 return ret;
3070         payload->payload_state = DP_PAYLOAD_REMOTE;
3071         return ret;
3072 }
3073
3074 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3075                                         struct drm_dp_mst_port *port,
3076                                         int id,
3077                                         struct drm_dp_payload *payload)
3078 {
3079         DRM_DEBUG_KMS("\n");
3080         /* it's okay for these to fail */
3081         if (port) {
3082                 drm_dp_payload_send_msg(mgr, port, id, 0);
3083         }
3084
3085         drm_dp_dpcd_write_payload(mgr, id, payload);
3086         payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3087         return 0;
3088 }
3089
3090 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3091                                         int id,
3092                                         struct drm_dp_payload *payload)
3093 {
3094         payload->payload_state = 0;
3095         return 0;
3096 }
3097
3098 /**
3099  * drm_dp_update_payload_part1() - Execute payload update part 1
3100  * @mgr: manager to use.
3101  *
3102  * This iterates over all proposed virtual channels, and tries to
3103  * allocate space in the link for them. For 0->slots transitions,
3104  * this step just writes the VCPI to the MST device. For slots->0
3105  * transitions, this writes the updated VCPIs and removes the
3106  * remote VC payloads.
3107  *
3108  * after calling this the driver should generate ACT and payload
3109  * packets.
3110  */
3111 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3112 {
3113         struct drm_dp_payload req_payload;
3114         struct drm_dp_mst_port *port;
3115         int i, j;
3116         int cur_slots = 1;
3117
3118         mutex_lock(&mgr->payload_lock);
3119         for (i = 0; i < mgr->max_payloads; i++) {
3120                 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3121                 struct drm_dp_payload *payload = &mgr->payloads[i];
3122                 bool put_port = false;
3123
3124                 /* solve the current payloads - compare to the hw ones
3125                    - update the hw view */
3126                 req_payload.start_slot = cur_slots;
3127                 if (vcpi) {
3128                         port = container_of(vcpi, struct drm_dp_mst_port,
3129                                             vcpi);
3130
3131                         /* Validated ports don't matter if we're releasing
3132                          * VCPI
3133                          */
3134                         if (vcpi->num_slots) {
3135                                 port = drm_dp_mst_topology_get_port_validated(
3136                                     mgr, port);
3137                                 if (!port) {
3138                                         mutex_unlock(&mgr->payload_lock);
3139                                         return -EINVAL;
3140                                 }
3141                                 put_port = true;
3142                         }
3143
3144                         req_payload.num_slots = vcpi->num_slots;
3145                         req_payload.vcpi = vcpi->vcpi;
3146                 } else {
3147                         port = NULL;
3148                         req_payload.num_slots = 0;
3149                 }
3150
3151                 payload->start_slot = req_payload.start_slot;
3152                 /* work out what is required to happen with this payload */
3153                 if (payload->num_slots != req_payload.num_slots) {
3154
3155                         /* need to push an update for this payload */
3156                         if (req_payload.num_slots) {
3157                                 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3158                                                             &req_payload);
3159                                 payload->num_slots = req_payload.num_slots;
3160                                 payload->vcpi = req_payload.vcpi;
3161
3162                         } else if (payload->num_slots) {
3163                                 payload->num_slots = 0;
3164                                 drm_dp_destroy_payload_step1(mgr, port,
3165                                                              payload->vcpi,
3166                                                              payload);
3167                                 req_payload.payload_state =
3168                                         payload->payload_state;
3169                                 payload->start_slot = 0;
3170                         }
3171                         payload->payload_state = req_payload.payload_state;
3172                 }
3173                 cur_slots += req_payload.num_slots;
3174
3175                 if (put_port)
3176                         drm_dp_mst_topology_put_port(port);
3177         }
3178
3179         for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3180                 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3181                         i++;
3182                         continue;
3183                 }
3184
3185                 DRM_DEBUG_KMS("removing payload %d\n", i);
3186                 for (j = i; j < mgr->max_payloads - 1; j++) {
3187                         mgr->payloads[j] = mgr->payloads[j + 1];
3188                         mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3189
3190                         if (mgr->proposed_vcpis[j] &&
3191                             mgr->proposed_vcpis[j]->num_slots) {
3192                                 set_bit(j + 1, &mgr->payload_mask);
3193                         } else {
3194                                 clear_bit(j + 1, &mgr->payload_mask);
3195                         }
3196                 }
3197
3198                 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3199                        sizeof(struct drm_dp_payload));
3200                 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3201                 clear_bit(mgr->max_payloads, &mgr->payload_mask);
3202         }
3203         mutex_unlock(&mgr->payload_lock);
3204
3205         return 0;
3206 }
3207 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3208
3209 /**
3210  * drm_dp_update_payload_part2() - Execute payload update part 2
3211  * @mgr: manager to use.
3212  *
3213  * This iterates over all proposed virtual channels, and tries to
3214  * allocate space in the link for them. For 0->slots transitions,
3215  * this step writes the remote VC payload commands. For slots->0
3216  * this just resets some internal state.
3217  */
3218 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3219 {
3220         struct drm_dp_mst_port *port;
3221         int i;
3222         int ret = 0;
3223         mutex_lock(&mgr->payload_lock);
3224         for (i = 0; i < mgr->max_payloads; i++) {
3225
3226                 if (!mgr->proposed_vcpis[i])
3227                         continue;
3228
3229                 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3230
3231                 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3232                 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3233                         ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3234                 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3235                         ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3236                 }
3237                 if (ret) {
3238                         mutex_unlock(&mgr->payload_lock);
3239                         return ret;
3240                 }
3241         }
3242         mutex_unlock(&mgr->payload_lock);
3243         return 0;
3244 }
3245 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3246
3247 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3248                                  struct drm_dp_mst_port *port,
3249                                  int offset, int size, u8 *bytes)
3250 {
3251         int len;
3252         int ret = 0;
3253         struct drm_dp_sideband_msg_tx *txmsg;
3254         struct drm_dp_mst_branch *mstb;
3255
3256         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3257         if (!mstb)
3258                 return -EINVAL;
3259
3260         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3261         if (!txmsg) {
3262                 ret = -ENOMEM;
3263                 goto fail_put;
3264         }
3265
3266         len = build_dpcd_read(txmsg, port->port_num, offset, size);
3267         txmsg->dst = port->parent;
3268
3269         drm_dp_queue_down_tx(mgr, txmsg);
3270
3271         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3272         if (ret < 0)
3273                 goto fail_free;
3274
3275         /* DPCD read should never be NACKed */
3276         if (txmsg->reply.reply_type == 1) {
3277                 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3278                           mstb, port->port_num, offset, size);
3279                 ret = -EIO;
3280                 goto fail_free;
3281         }
3282
3283         if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3284                 ret = -EPROTO;
3285                 goto fail_free;
3286         }
3287
3288         ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3289                     size);
3290         memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3291
3292 fail_free:
3293         kfree(txmsg);
3294 fail_put:
3295         drm_dp_mst_topology_put_mstb(mstb);
3296
3297         return ret;
3298 }
3299
3300 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3301                                   struct drm_dp_mst_port *port,
3302                                   int offset, int size, u8 *bytes)
3303 {
3304         int len;
3305         int ret;
3306         struct drm_dp_sideband_msg_tx *txmsg;
3307         struct drm_dp_mst_branch *mstb;
3308
3309         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3310         if (!mstb)
3311                 return -EINVAL;
3312
3313         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3314         if (!txmsg) {
3315                 ret = -ENOMEM;
3316                 goto fail_put;
3317         }
3318
3319         len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3320         txmsg->dst = mstb;
3321
3322         drm_dp_queue_down_tx(mgr, txmsg);
3323
3324         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3325         if (ret > 0) {
3326                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3327                         ret = -EIO;
3328                 else
3329                         ret = 0;
3330         }
3331         kfree(txmsg);
3332 fail_put:
3333         drm_dp_mst_topology_put_mstb(mstb);
3334         return ret;
3335 }
3336
3337 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3338 {
3339         struct drm_dp_sideband_msg_reply_body reply;
3340
3341         reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3342         reply.req_type = req_type;
3343         drm_dp_encode_sideband_reply(&reply, msg);
3344         return 0;
3345 }
3346
3347 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3348                                     struct drm_dp_mst_branch *mstb,
3349                                     int req_type, int seqno, bool broadcast)
3350 {
3351         struct drm_dp_sideband_msg_tx *txmsg;
3352
3353         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3354         if (!txmsg)
3355                 return -ENOMEM;
3356
3357         txmsg->dst = mstb;
3358         txmsg->seqno = seqno;
3359         drm_dp_encode_up_ack_reply(txmsg, req_type);
3360
3361         mutex_lock(&mgr->qlock);
3362
3363         process_single_up_tx_qlock(mgr, txmsg);
3364
3365         mutex_unlock(&mgr->qlock);
3366
3367         kfree(txmsg);
3368         return 0;
3369 }
3370
3371 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
3372 {
3373         if (dp_link_bw == 0 || dp_link_count == 0)
3374                 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3375                               dp_link_bw, dp_link_count);
3376
3377         return dp_link_bw * dp_link_count / 2;
3378 }
3379
3380 /**
3381  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3382  * @mgr: manager to set state for
3383  * @mst_state: true to enable MST on this connector - false to disable.
3384  *
3385  * This is called by the driver when it detects an MST capable device plugged
3386  * into a DP MST capable port, or when a DP MST capable device is unplugged.
3387  */
3388 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3389 {
3390         int ret = 0;
3391         struct drm_dp_mst_branch *mstb = NULL;
3392
3393         mutex_lock(&mgr->lock);
3394         if (mst_state == mgr->mst_state)
3395                 goto out_unlock;
3396
3397         mgr->mst_state = mst_state;
3398         /* set the device into MST mode */
3399         if (mst_state) {
3400                 WARN_ON(mgr->mst_primary);
3401
3402                 /* get dpcd info */
3403                 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3404                 if (ret != DP_RECEIVER_CAP_SIZE) {
3405                         DRM_DEBUG_KMS("failed to read DPCD\n");
3406                         goto out_unlock;
3407                 }
3408
3409                 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3410                                                         mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3411                 if (mgr->pbn_div == 0) {
3412                         ret = -EINVAL;
3413                         goto out_unlock;
3414                 }
3415
3416                 /* add initial branch device at LCT 1 */
3417                 mstb = drm_dp_add_mst_branch_device(1, NULL);
3418                 if (mstb == NULL) {
3419                         ret = -ENOMEM;
3420                         goto out_unlock;
3421                 }
3422                 mstb->mgr = mgr;
3423
3424                 /* give this the main reference */
3425                 mgr->mst_primary = mstb;
3426                 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3427
3428                 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3429                                                          DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3430                 if (ret < 0) {
3431                         goto out_unlock;
3432                 }
3433
3434                 {
3435                         struct drm_dp_payload reset_pay;
3436                         reset_pay.start_slot = 0;
3437                         reset_pay.num_slots = 0x3f;
3438                         drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3439                 }
3440
3441                 queue_work(system_long_wq, &mgr->work);
3442
3443                 ret = 0;
3444         } else {
3445                 /* disable MST on the device */
3446                 mstb = mgr->mst_primary;
3447                 mgr->mst_primary = NULL;
3448                 /* this can fail if the device is gone */
3449                 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3450                 ret = 0;
3451                 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3452                 mgr->payload_mask = 0;
3453                 set_bit(0, &mgr->payload_mask);
3454                 mgr->vcpi_mask = 0;
3455         }
3456
3457 out_unlock:
3458         mutex_unlock(&mgr->lock);
3459         if (mstb)
3460                 drm_dp_mst_topology_put_mstb(mstb);
3461         return ret;
3462
3463 }
3464 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3465
3466 static void
3467 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3468 {
3469         struct drm_dp_mst_port *port;
3470
3471         /* The link address will need to be re-sent on resume */
3472         mstb->link_address_sent = false;
3473
3474         list_for_each_entry(port, &mstb->ports, next) {
3475                 /* The PBN for each port will also need to be re-probed */
3476                 port->available_pbn = 0;
3477
3478                 if (port->mstb)
3479                         drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3480         }
3481 }
3482
3483 /**
3484  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3485  * @mgr: manager to suspend
3486  *
3487  * This function tells the MST device that we can't handle UP messages
3488  * anymore. This should stop it from sending any since we are suspended.
3489  */
3490 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3491 {
3492         mutex_lock(&mgr->lock);
3493         drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3494                            DP_MST_EN | DP_UPSTREAM_IS_SRC);
3495         mutex_unlock(&mgr->lock);
3496         flush_work(&mgr->up_req_work);
3497         flush_work(&mgr->work);
3498         flush_work(&mgr->delayed_destroy_work);
3499
3500         mutex_lock(&mgr->lock);
3501         if (mgr->mst_state && mgr->mst_primary)
3502                 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3503         mutex_unlock(&mgr->lock);
3504 }
3505 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3506
3507 /**
3508  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3509  * @mgr: manager to resume
3510  * @sync: whether or not to perform topology reprobing synchronously
3511  *
3512  * This will fetch DPCD and see if the device is still there,
3513  * if it is, it will rewrite the MSTM control bits, and return.
3514  *
3515  * If the device fails this returns -1, and the driver should do
3516  * a full MST reprobe, in case we were undocked.
3517  *
3518  * During system resume (where it is assumed that the driver will be calling
3519  * drm_atomic_helper_resume()) this function should be called beforehand with
3520  * @sync set to true. In contexts like runtime resume where the driver is not
3521  * expected to be calling drm_atomic_helper_resume(), this function should be
3522  * called with @sync set to false in order to avoid deadlocking.
3523  *
3524  * Returns: -1 if the MST topology was removed while we were suspended, 0
3525  * otherwise.
3526  */
3527 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3528                                    bool sync)
3529 {
3530         int ret;
3531         u8 guid[16];
3532
3533         mutex_lock(&mgr->lock);
3534         if (!mgr->mst_primary)
3535                 goto out_fail;
3536
3537         ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3538                                DP_RECEIVER_CAP_SIZE);
3539         if (ret != DP_RECEIVER_CAP_SIZE) {
3540                 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3541                 goto out_fail;
3542         }
3543
3544         ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3545                                  DP_MST_EN |
3546                                  DP_UP_REQ_EN |
3547                                  DP_UPSTREAM_IS_SRC);
3548         if (ret < 0) {
3549                 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3550                 goto out_fail;
3551         }
3552
3553         /* Some hubs forget their guids after they resume */
3554         ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3555         if (ret != 16) {
3556                 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3557                 goto out_fail;
3558         }
3559         drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3560
3561         /*
3562          * For the final step of resuming the topology, we need to bring the
3563          * state of our in-memory topology back into sync with reality. So,
3564          * restart the probing process as if we're probing a new hub
3565          */
3566         queue_work(system_long_wq, &mgr->work);
3567         mutex_unlock(&mgr->lock);
3568
3569         if (sync) {
3570                 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3571                 flush_work(&mgr->work);
3572         }
3573
3574         return 0;
3575
3576 out_fail:
3577         mutex_unlock(&mgr->lock);
3578         return -1;
3579 }
3580 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3581
3582 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3583 {
3584         int len;
3585         u8 replyblock[32];
3586         int replylen, origlen, curreply;
3587         int ret;
3588         struct drm_dp_sideband_msg_rx *msg;
3589         int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3590         msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3591
3592         len = min(mgr->max_dpcd_transaction_bytes, 16);
3593         ret = drm_dp_dpcd_read(mgr->aux, basereg,
3594                                replyblock, len);
3595         if (ret != len) {
3596                 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3597                 return false;
3598         }
3599         ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3600         if (!ret) {
3601                 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3602                 return false;
3603         }
3604         replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3605
3606         origlen = replylen;
3607         replylen -= len;
3608         curreply = len;
3609         while (replylen > 0) {
3610                 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3611                 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3612                                     replyblock, len);
3613                 if (ret != len) {
3614                         DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3615                                       len, ret);
3616                         return false;
3617                 }
3618
3619                 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3620                 if (!ret) {
3621                         DRM_DEBUG_KMS("failed to build sideband msg\n");
3622                         return false;
3623                 }
3624
3625                 curreply += len;
3626                 replylen -= len;
3627         }
3628         return true;
3629 }
3630
3631 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3632 {
3633         struct drm_dp_sideband_msg_tx *txmsg;
3634         struct drm_dp_mst_branch *mstb;
3635         struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3636         int slot = -1;
3637
3638         if (!drm_dp_get_one_sb_msg(mgr, false))
3639                 goto clear_down_rep_recv;
3640
3641         if (!mgr->down_rep_recv.have_eomt)
3642                 return 0;
3643
3644         mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3645         if (!mstb) {
3646                 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3647                               hdr->lct);
3648                 goto clear_down_rep_recv;
3649         }
3650
3651         /* find the message */
3652         slot = hdr->seqno;
3653         mutex_lock(&mgr->qlock);
3654         txmsg = mstb->tx_slots[slot];
3655         /* remove from slots */
3656         mutex_unlock(&mgr->qlock);
3657
3658         if (!txmsg) {
3659                 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3660                               mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3661                               mgr->down_rep_recv.msg[0]);
3662                 goto no_msg;
3663         }
3664
3665         drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3666
3667         if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3668                 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3669                               txmsg->reply.req_type,
3670                               drm_dp_mst_req_type_str(txmsg->reply.req_type),
3671                               txmsg->reply.u.nak.reason,
3672                               drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3673                               txmsg->reply.u.nak.nak_data);
3674
3675         memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3676         drm_dp_mst_topology_put_mstb(mstb);
3677
3678         mutex_lock(&mgr->qlock);
3679         txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3680         mstb->tx_slots[slot] = NULL;
3681         mutex_unlock(&mgr->qlock);
3682
3683         wake_up_all(&mgr->tx_waitq);
3684
3685         return 0;
3686
3687 no_msg:
3688         drm_dp_mst_topology_put_mstb(mstb);
3689 clear_down_rep_recv:
3690         memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3691
3692         return 0;
3693 }
3694
3695 static inline bool
3696 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3697                           struct drm_dp_pending_up_req *up_req)
3698 {
3699         struct drm_dp_mst_branch *mstb = NULL;
3700         struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3701         struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3702         bool hotplug = false;
3703
3704         if (hdr->broadcast) {
3705                 const u8 *guid = NULL;
3706
3707                 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3708                         guid = msg->u.conn_stat.guid;
3709                 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3710                         guid = msg->u.resource_stat.guid;
3711
3712                 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3713         } else {
3714                 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3715         }
3716
3717         if (!mstb) {
3718                 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3719                               hdr->lct);
3720                 return false;
3721         }
3722
3723         /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3724         if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3725                 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3726                 hotplug = true;
3727         }
3728
3729         drm_dp_mst_topology_put_mstb(mstb);
3730         return hotplug;
3731 }
3732
3733 static void drm_dp_mst_up_req_work(struct work_struct *work)
3734 {
3735         struct drm_dp_mst_topology_mgr *mgr =
3736                 container_of(work, struct drm_dp_mst_topology_mgr,
3737                              up_req_work);
3738         struct drm_dp_pending_up_req *up_req;
3739         bool send_hotplug = false;
3740
3741         mutex_lock(&mgr->probe_lock);
3742         while (true) {
3743                 mutex_lock(&mgr->up_req_lock);
3744                 up_req = list_first_entry_or_null(&mgr->up_req_list,
3745                                                   struct drm_dp_pending_up_req,
3746                                                   next);
3747                 if (up_req)
3748                         list_del(&up_req->next);
3749                 mutex_unlock(&mgr->up_req_lock);
3750
3751                 if (!up_req)
3752                         break;
3753
3754                 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3755                 kfree(up_req);
3756         }
3757         mutex_unlock(&mgr->probe_lock);
3758
3759         if (send_hotplug)
3760                 drm_kms_helper_hotplug_event(mgr->dev);
3761 }
3762
3763 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3764 {
3765         struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3766         struct drm_dp_pending_up_req *up_req;
3767         bool seqno;
3768
3769         if (!drm_dp_get_one_sb_msg(mgr, true))
3770                 goto out;
3771
3772         if (!mgr->up_req_recv.have_eomt)
3773                 return 0;
3774
3775         up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3776         if (!up_req) {
3777                 DRM_ERROR("Not enough memory to process MST up req\n");
3778                 return -ENOMEM;
3779         }
3780         INIT_LIST_HEAD(&up_req->next);
3781
3782         seqno = hdr->seqno;
3783         drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3784
3785         if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3786             up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3787                 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3788                               up_req->msg.req_type);
3789                 kfree(up_req);
3790                 goto out;
3791         }
3792
3793         drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3794                                  seqno, false);
3795
3796         if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3797                 const struct drm_dp_connection_status_notify *conn_stat =
3798                         &up_req->msg.u.conn_stat;
3799
3800                 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3801                               conn_stat->port_number,
3802                               conn_stat->legacy_device_plug_status,
3803                               conn_stat->displayport_device_plug_status,
3804                               conn_stat->message_capability_status,
3805                               conn_stat->input_port,
3806                               conn_stat->peer_device_type);
3807         } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3808                 const struct drm_dp_resource_status_notify *res_stat =
3809                         &up_req->msg.u.resource_stat;
3810
3811                 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3812                               res_stat->port_number,
3813                               res_stat->available_pbn);
3814         }
3815
3816         up_req->hdr = *hdr;
3817         mutex_lock(&mgr->up_req_lock);
3818         list_add_tail(&up_req->next, &mgr->up_req_list);
3819         mutex_unlock(&mgr->up_req_lock);
3820         queue_work(system_long_wq, &mgr->up_req_work);
3821
3822 out:
3823         memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3824         return 0;
3825 }
3826
3827 /**
3828  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3829  * @mgr: manager to notify irq for.
3830  * @esi: 4 bytes from SINK_COUNT_ESI
3831  * @handled: whether the hpd interrupt was consumed or not
3832  *
3833  * This should be called from the driver when it detects a short IRQ,
3834  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3835  * topology manager will process the sideband messages received as a result
3836  * of this.
3837  */
3838 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3839 {
3840         int ret = 0;
3841         int sc;
3842         *handled = false;
3843         sc = esi[0] & 0x3f;
3844
3845         if (sc != mgr->sink_count) {
3846                 mgr->sink_count = sc;
3847                 *handled = true;
3848         }
3849
3850         if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3851                 ret = drm_dp_mst_handle_down_rep(mgr);
3852                 *handled = true;
3853         }
3854
3855         if (esi[1] & DP_UP_REQ_MSG_RDY) {
3856                 ret |= drm_dp_mst_handle_up_req(mgr);
3857                 *handled = true;
3858         }
3859
3860         drm_dp_mst_kick_tx(mgr);
3861         return ret;
3862 }
3863 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3864
3865 /**
3866  * drm_dp_mst_detect_port() - get connection status for an MST port
3867  * @connector: DRM connector for this port
3868  * @ctx: The acquisition context to use for grabbing locks
3869  * @mgr: manager for this port
3870  * @port: pointer to a port
3871  *
3872  * This returns the current connection state for a port.
3873  */
3874 int
3875 drm_dp_mst_detect_port(struct drm_connector *connector,
3876                        struct drm_modeset_acquire_ctx *ctx,
3877                        struct drm_dp_mst_topology_mgr *mgr,
3878                        struct drm_dp_mst_port *port)
3879 {
3880         int ret;
3881
3882         /* we need to search for the port in the mgr in case it's gone */
3883         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3884         if (!port)
3885                 return connector_status_disconnected;
3886
3887         ret = drm_modeset_lock(&mgr->base.lock, ctx);
3888         if (ret)
3889                 goto out;
3890
3891         ret = connector_status_disconnected;
3892
3893         if (!port->ddps)
3894                 goto out;
3895
3896         switch (port->pdt) {
3897         case DP_PEER_DEVICE_NONE:
3898         case DP_PEER_DEVICE_MST_BRANCHING:
3899                 break;
3900
3901         case DP_PEER_DEVICE_SST_SINK:
3902                 ret = connector_status_connected;
3903                 /* for logical ports - cache the EDID */
3904                 if (port->port_num >= 8 && !port->cached_edid) {
3905                         port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3906                 }
3907                 break;
3908         case DP_PEER_DEVICE_DP_LEGACY_CONV:
3909                 if (port->ldps)
3910                         ret = connector_status_connected;
3911                 break;
3912         }
3913 out:
3914         drm_dp_mst_topology_put_port(port);
3915         return ret;
3916 }
3917 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3918
3919 /**
3920  * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3921  * @mgr: manager for this port
3922  * @port: unverified pointer to a port.
3923  *
3924  * This returns whether the port supports audio or not.
3925  */
3926 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3927                                         struct drm_dp_mst_port *port)
3928 {
3929         bool ret = false;
3930
3931         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3932         if (!port)
3933                 return ret;
3934         ret = port->has_audio;
3935         drm_dp_mst_topology_put_port(port);
3936         return ret;
3937 }
3938 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3939
3940 /**
3941  * drm_dp_mst_get_edid() - get EDID for an MST port
3942  * @connector: toplevel connector to get EDID for
3943  * @mgr: manager for this port
3944  * @port: unverified pointer to a port.
3945  *
3946  * This returns an EDID for the port connected to a connector,
3947  * It validates the pointer still exists so the caller doesn't require a
3948  * reference.
3949  */
3950 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3951 {
3952         struct edid *edid = NULL;
3953
3954         /* we need to search for the port in the mgr in case it's gone */
3955         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3956         if (!port)
3957                 return NULL;
3958
3959         if (port->cached_edid)
3960                 edid = drm_edid_duplicate(port->cached_edid);
3961         else {
3962                 edid = drm_get_edid(connector, &port->aux.ddc);
3963         }
3964         port->has_audio = drm_detect_monitor_audio(edid);
3965         drm_dp_mst_topology_put_port(port);
3966         return edid;
3967 }
3968 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3969
3970 /**
3971  * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3972  * @mgr: manager to use
3973  * @pbn: payload bandwidth to convert into slots.
3974  *
3975  * Calculate the number of VCPI slots that will be required for the given PBN
3976  * value. This function is deprecated, and should not be used in atomic
3977  * drivers.
3978  *
3979  * RETURNS:
3980  * The total slots required for this port, or error.
3981  */
3982 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3983                            int pbn)
3984 {
3985         int num_slots;
3986
3987         num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3988
3989         /* max. time slots - one slot for MTP header */
3990         if (num_slots > 63)
3991                 return -ENOSPC;
3992         return num_slots;
3993 }
3994 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3995
3996 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3997                             struct drm_dp_vcpi *vcpi, int pbn, int slots)
3998 {
3999         int ret;
4000
4001         /* max. time slots - one slot for MTP header */
4002         if (slots > 63)
4003                 return -ENOSPC;
4004
4005         vcpi->pbn = pbn;
4006         vcpi->aligned_pbn = slots * mgr->pbn_div;
4007         vcpi->num_slots = slots;
4008
4009         ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4010         if (ret < 0)
4011                 return ret;
4012         return 0;
4013 }
4014
4015 /**
4016  * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4017  * @state: global atomic state
4018  * @mgr: MST topology manager for the port
4019  * @port: port to find vcpi slots for
4020  * @pbn: bandwidth required for the mode in PBN
4021  *
4022  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4023  * may have had. Any atomic drivers which support MST must call this function
4024  * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4025  * current VCPI allocation for the new state, but only when
4026  * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4027  * to ensure compatibility with userspace applications that still use the
4028  * legacy modesetting UAPI.
4029  *
4030  * Allocations set by this function are not checked against the bandwidth
4031  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4032  *
4033  * Additionally, it is OK to call this function multiple times on the same
4034  * @port as needed. It is not OK however, to call this function and
4035  * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4036  *
4037  * See also:
4038  * drm_dp_atomic_release_vcpi_slots()
4039  * drm_dp_mst_atomic_check()
4040  *
4041  * Returns:
4042  * Total slots in the atomic state assigned for this port, or a negative error
4043  * code if the port no longer exists
4044  */
4045 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4046                                   struct drm_dp_mst_topology_mgr *mgr,
4047                                   struct drm_dp_mst_port *port, int pbn)
4048 {
4049         struct drm_dp_mst_topology_state *topology_state;
4050         struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4051         int prev_slots, req_slots;
4052
4053         topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4054         if (IS_ERR(topology_state))
4055                 return PTR_ERR(topology_state);
4056
4057         /* Find the current allocation for this port, if any */
4058         list_for_each_entry(pos, &topology_state->vcpis, next) {
4059                 if (pos->port == port) {
4060                         vcpi = pos;
4061                         prev_slots = vcpi->vcpi;
4062
4063                         /*
4064                          * This should never happen, unless the driver tries
4065                          * releasing and allocating the same VCPI allocation,
4066                          * which is an error
4067                          */
4068                         if (WARN_ON(!prev_slots)) {
4069                                 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4070                                           port);
4071                                 return -EINVAL;
4072                         }
4073
4074                         break;
4075                 }
4076         }
4077         if (!vcpi)
4078                 prev_slots = 0;
4079
4080         req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4081
4082         DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4083                          port->connector->base.id, port->connector->name,
4084                          port, prev_slots, req_slots);
4085
4086         /* Add the new allocation to the state */
4087         if (!vcpi) {
4088                 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4089                 if (!vcpi)
4090                         return -ENOMEM;
4091
4092                 drm_dp_mst_get_port_malloc(port);
4093                 vcpi->port = port;
4094                 list_add(&vcpi->next, &topology_state->vcpis);
4095         }
4096         vcpi->vcpi = req_slots;
4097
4098         return req_slots;
4099 }
4100 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4101
4102 /**
4103  * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4104  * @state: global atomic state
4105  * @mgr: MST topology manager for the port
4106  * @port: The port to release the VCPI slots from
4107  *
4108  * Releases any VCPI slots that have been allocated to a port in the atomic
4109  * state. Any atomic drivers which support MST must call this function in
4110  * their &drm_connector_helper_funcs.atomic_check() callback when the
4111  * connector will no longer have VCPI allocated (e.g. because its CRTC was
4112  * removed) when it had VCPI allocated in the previous atomic state.
4113  *
4114  * It is OK to call this even if @port has been removed from the system.
4115  * Additionally, it is OK to call this function multiple times on the same
4116  * @port as needed. It is not OK however, to call this function and
4117  * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4118  * phase.
4119  *
4120  * See also:
4121  * drm_dp_atomic_find_vcpi_slots()
4122  * drm_dp_mst_atomic_check()
4123  *
4124  * Returns:
4125  * 0 if all slots for this port were added back to
4126  * &drm_dp_mst_topology_state.avail_slots or negative error code
4127  */
4128 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4129                                      struct drm_dp_mst_topology_mgr *mgr,
4130                                      struct drm_dp_mst_port *port)
4131 {
4132         struct drm_dp_mst_topology_state *topology_state;
4133         struct drm_dp_vcpi_allocation *pos;
4134         bool found = false;
4135
4136         topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4137         if (IS_ERR(topology_state))
4138                 return PTR_ERR(topology_state);
4139
4140         list_for_each_entry(pos, &topology_state->vcpis, next) {
4141                 if (pos->port == port) {
4142                         found = true;
4143                         break;
4144                 }
4145         }
4146         if (WARN_ON(!found)) {
4147                 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4148                           port, &topology_state->base);
4149                 return -EINVAL;
4150         }
4151
4152         DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4153         if (pos->vcpi) {
4154                 drm_dp_mst_put_port_malloc(port);
4155                 pos->vcpi = 0;
4156         }
4157
4158         return 0;
4159 }
4160 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4161
4162 /**
4163  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4164  * @mgr: manager for this port
4165  * @port: port to allocate a virtual channel for.
4166  * @pbn: payload bandwidth number to request
4167  * @slots: returned number of slots for this PBN.
4168  */
4169 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4170                               struct drm_dp_mst_port *port, int pbn, int slots)
4171 {
4172         int ret;
4173
4174         port = drm_dp_mst_topology_get_port_validated(mgr, port);
4175         if (!port)
4176                 return false;
4177
4178         if (slots < 0)
4179                 return false;
4180
4181         if (port->vcpi.vcpi > 0) {
4182                 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4183                               port->vcpi.vcpi, port->vcpi.pbn, pbn);
4184                 if (pbn == port->vcpi.pbn) {
4185                         drm_dp_mst_topology_put_port(port);
4186                         return true;
4187                 }
4188         }
4189
4190         ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4191         if (ret) {
4192                 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4193                               DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4194                 goto out;
4195         }
4196         DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4197                       pbn, port->vcpi.num_slots);
4198
4199         /* Keep port allocated until its payload has been removed */
4200         drm_dp_mst_get_port_malloc(port);
4201         drm_dp_mst_topology_put_port(port);
4202         return true;
4203 out:
4204         return false;
4205 }
4206 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4207
4208 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4209 {
4210         int slots = 0;
4211         port = drm_dp_mst_topology_get_port_validated(mgr, port);
4212         if (!port)
4213                 return slots;
4214
4215         slots = port->vcpi.num_slots;
4216         drm_dp_mst_topology_put_port(port);
4217         return slots;
4218 }
4219 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4220
4221 /**
4222  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4223  * @mgr: manager for this port
4224  * @port: unverified pointer to a port.
4225  *
4226  * This just resets the number of slots for the ports VCPI for later programming.
4227  */
4228 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4229 {
4230         /*
4231          * A port with VCPI will remain allocated until its VCPI is
4232          * released, no verified ref needed
4233          */
4234
4235         port->vcpi.num_slots = 0;
4236 }
4237 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4238
4239 /**
4240  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4241  * @mgr: manager for this port
4242  * @port: port to deallocate vcpi for
4243  *
4244  * This can be called unconditionally, regardless of whether
4245  * drm_dp_mst_allocate_vcpi() succeeded or not.
4246  */
4247 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4248                                 struct drm_dp_mst_port *port)
4249 {
4250         if (!port->vcpi.vcpi)
4251                 return;
4252
4253         drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4254         port->vcpi.num_slots = 0;
4255         port->vcpi.pbn = 0;
4256         port->vcpi.aligned_pbn = 0;
4257         port->vcpi.vcpi = 0;
4258         drm_dp_mst_put_port_malloc(port);
4259 }
4260 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4261
4262 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4263                                      int id, struct drm_dp_payload *payload)
4264 {
4265         u8 payload_alloc[3], status;
4266         int ret;
4267         int retries = 0;
4268
4269         drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4270                            DP_PAYLOAD_TABLE_UPDATED);
4271
4272         payload_alloc[0] = id;
4273         payload_alloc[1] = payload->start_slot;
4274         payload_alloc[2] = payload->num_slots;
4275
4276         ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4277         if (ret != 3) {
4278                 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4279                 goto fail;
4280         }
4281
4282 retry:
4283         ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4284         if (ret < 0) {
4285                 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4286                 goto fail;
4287         }
4288
4289         if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4290                 retries++;
4291                 if (retries < 20) {
4292                         usleep_range(10000, 20000);
4293                         goto retry;
4294                 }
4295                 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4296                 ret = -EINVAL;
4297                 goto fail;
4298         }
4299         ret = 0;
4300 fail:
4301         return ret;
4302 }
4303
4304
4305 /**
4306  * drm_dp_check_act_status() - Check ACT handled status.
4307  * @mgr: manager to use
4308  *
4309  * Check the payload status bits in the DPCD for ACT handled completion.
4310  */
4311 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4312 {
4313         u8 status;
4314         int ret;
4315         int count = 0;
4316
4317         do {
4318                 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4319
4320                 if (ret < 0) {
4321                         DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4322                         goto fail;
4323                 }
4324
4325                 if (status & DP_PAYLOAD_ACT_HANDLED)
4326                         break;
4327                 count++;
4328                 udelay(100);
4329
4330         } while (count < 30);
4331
4332         if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
4333                 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
4334                 ret = -EINVAL;
4335                 goto fail;
4336         }
4337         return 0;
4338 fail:
4339         return ret;
4340 }
4341 EXPORT_SYMBOL(drm_dp_check_act_status);
4342
4343 /**
4344  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4345  * @clock: dot clock for the mode
4346  * @bpp: bpp for the mode.
4347  *
4348  * This uses the formula in the spec to calculate the PBN value for a mode.
4349  */
4350 int drm_dp_calc_pbn_mode(int clock, int bpp)
4351 {
4352         /*
4353          * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4354          * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4355          * common multiplier to render an integer PBN for all link rate/lane
4356          * counts combinations
4357          * calculate
4358          * peak_kbps *= (1006/1000)
4359          * peak_kbps *= (64/54)
4360          * peak_kbps *= 8    convert to bytes
4361          */
4362         return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4363                                 8 * 54 * 1000 * 1000);
4364 }
4365 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4366
4367 /* we want to kick the TX after we've ack the up/down IRQs. */
4368 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4369 {
4370         queue_work(system_long_wq, &mgr->tx_work);
4371 }
4372
4373 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4374                                  struct drm_dp_mst_branch *mstb)
4375 {
4376         struct drm_dp_mst_port *port;
4377         int tabs = mstb->lct;
4378         char prefix[10];
4379         int i;
4380
4381         for (i = 0; i < tabs; i++)
4382                 prefix[i] = '\t';
4383         prefix[i] = '\0';
4384
4385         seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4386         list_for_each_entry(port, &mstb->ports, next) {
4387                 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4388                 if (port->mstb)
4389                         drm_dp_mst_dump_mstb(m, port->mstb);
4390         }
4391 }
4392
4393 #define DP_PAYLOAD_TABLE_SIZE           64
4394
4395 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4396                                   char *buf)
4397 {
4398         int i;
4399
4400         for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4401                 if (drm_dp_dpcd_read(mgr->aux,
4402                                      DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4403                                      &buf[i], 16) != 16)
4404                         return false;
4405         }
4406         return true;
4407 }
4408
4409 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4410                                struct drm_dp_mst_port *port, char *name,
4411                                int namelen)
4412 {
4413         struct edid *mst_edid;
4414
4415         mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4416         drm_edid_get_monitor_name(mst_edid, name, namelen);
4417 }
4418
4419 /**
4420  * drm_dp_mst_dump_topology(): dump topology to seq file.
4421  * @m: seq_file to dump output to
4422  * @mgr: manager to dump current topology for.
4423  *
4424  * helper to dump MST topology to a seq file for debugfs.
4425  */
4426 void drm_dp_mst_dump_topology(struct seq_file *m,
4427                               struct drm_dp_mst_topology_mgr *mgr)
4428 {
4429         int i;
4430         struct drm_dp_mst_port *port;
4431
4432         mutex_lock(&mgr->lock);
4433         if (mgr->mst_primary)
4434                 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4435
4436         /* dump VCPIs */
4437         mutex_unlock(&mgr->lock);
4438
4439         mutex_lock(&mgr->payload_lock);
4440         seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4441                 mgr->max_payloads);
4442
4443         for (i = 0; i < mgr->max_payloads; i++) {
4444                 if (mgr->proposed_vcpis[i]) {
4445                         char name[14];
4446
4447                         port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4448                         fetch_monitor_name(mgr, port, name, sizeof(name));
4449                         seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4450                                    port->port_num, port->vcpi.vcpi,
4451                                    port->vcpi.num_slots,
4452                                    (*name != 0) ? name :  "Unknown");
4453                 } else
4454                         seq_printf(m, "vcpi %d:unused\n", i);
4455         }
4456         for (i = 0; i < mgr->max_payloads; i++) {
4457                 seq_printf(m, "payload %d: %d, %d, %d\n",
4458                            i,
4459                            mgr->payloads[i].payload_state,
4460                            mgr->payloads[i].start_slot,
4461                            mgr->payloads[i].num_slots);
4462
4463
4464         }
4465         mutex_unlock(&mgr->payload_lock);
4466
4467         mutex_lock(&mgr->lock);
4468         if (mgr->mst_primary) {
4469                 u8 buf[DP_PAYLOAD_TABLE_SIZE];
4470                 int ret;
4471
4472                 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4473                 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4474                 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4475                 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4476                 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4477                 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4478
4479                 /* dump the standard OUI branch header */
4480                 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4481                 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4482                 for (i = 0x3; i < 0x8 && buf[i]; i++)
4483                         seq_printf(m, "%c", buf[i]);
4484                 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4485                            buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4486                 if (dump_dp_payload_table(mgr, buf))
4487                         seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4488         }
4489
4490         mutex_unlock(&mgr->lock);
4491
4492 }
4493 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4494
4495 static void drm_dp_tx_work(struct work_struct *work)
4496 {
4497         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4498
4499         mutex_lock(&mgr->qlock);
4500         if (!list_empty(&mgr->tx_msg_downq))
4501                 process_single_down_tx_qlock(mgr);
4502         mutex_unlock(&mgr->qlock);
4503 }
4504
4505 static inline void
4506 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4507 {
4508         if (port->connector)
4509                 port->mgr->cbs->destroy_connector(port->mgr, port->connector);
4510
4511         drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE);
4512         drm_dp_mst_put_port_malloc(port);
4513 }
4514
4515 static inline void
4516 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4517 {
4518         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4519         struct drm_dp_mst_port *port, *tmp;
4520         bool wake_tx = false;
4521
4522         mutex_lock(&mgr->lock);
4523         list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
4524                 list_del(&port->next);
4525                 drm_dp_mst_topology_put_port(port);
4526         }
4527         mutex_unlock(&mgr->lock);
4528
4529         /* drop any tx slots msg */
4530         mutex_lock(&mstb->mgr->qlock);
4531         if (mstb->tx_slots[0]) {
4532                 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4533                 mstb->tx_slots[0] = NULL;
4534                 wake_tx = true;
4535         }
4536         if (mstb->tx_slots[1]) {
4537                 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4538                 mstb->tx_slots[1] = NULL;
4539                 wake_tx = true;
4540         }
4541         mutex_unlock(&mstb->mgr->qlock);
4542
4543         if (wake_tx)
4544                 wake_up_all(&mstb->mgr->tx_waitq);
4545
4546         drm_dp_mst_put_mstb_malloc(mstb);
4547 }
4548
4549 static void drm_dp_delayed_destroy_work(struct work_struct *work)
4550 {
4551         struct drm_dp_mst_topology_mgr *mgr =
4552                 container_of(work, struct drm_dp_mst_topology_mgr,
4553                              delayed_destroy_work);
4554         bool send_hotplug = false, go_again;
4555
4556         /*
4557          * Not a regular list traverse as we have to drop the destroy
4558          * connector lock before destroying the mstb/port, to avoid AB->BA
4559          * ordering between this lock and the config mutex.
4560          */
4561         do {
4562                 go_again = false;
4563
4564                 for (;;) {
4565                         struct drm_dp_mst_branch *mstb;
4566
4567                         mutex_lock(&mgr->delayed_destroy_lock);
4568                         mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4569                                                         struct drm_dp_mst_branch,
4570                                                         destroy_next);
4571                         if (mstb)
4572                                 list_del(&mstb->destroy_next);
4573                         mutex_unlock(&mgr->delayed_destroy_lock);
4574
4575                         if (!mstb)
4576                                 break;
4577
4578                         drm_dp_delayed_destroy_mstb(mstb);
4579                         go_again = true;
4580                 }
4581
4582                 for (;;) {
4583                         struct drm_dp_mst_port *port;
4584
4585                         mutex_lock(&mgr->delayed_destroy_lock);
4586                         port = list_first_entry_or_null(&mgr->destroy_port_list,
4587                                                         struct drm_dp_mst_port,
4588                                                         next);
4589                         if (port)
4590                                 list_del(&port->next);
4591                         mutex_unlock(&mgr->delayed_destroy_lock);
4592
4593                         if (!port)
4594                                 break;
4595
4596                         drm_dp_delayed_destroy_port(port);
4597                         send_hotplug = true;
4598                         go_again = true;
4599                 }
4600         } while (go_again);
4601
4602         if (send_hotplug)
4603                 drm_kms_helper_hotplug_event(mgr->dev);
4604 }
4605
4606 static struct drm_private_state *
4607 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4608 {
4609         struct drm_dp_mst_topology_state *state, *old_state =
4610                 to_dp_mst_topology_state(obj->state);
4611         struct drm_dp_vcpi_allocation *pos, *vcpi;
4612
4613         state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4614         if (!state)
4615                 return NULL;
4616
4617         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4618
4619         INIT_LIST_HEAD(&state->vcpis);
4620
4621         list_for_each_entry(pos, &old_state->vcpis, next) {
4622                 /* Prune leftover freed VCPI allocations */
4623                 if (!pos->vcpi)
4624                         continue;
4625
4626                 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4627                 if (!vcpi)
4628                         goto fail;
4629
4630                 drm_dp_mst_get_port_malloc(vcpi->port);
4631                 list_add(&vcpi->next, &state->vcpis);
4632         }
4633
4634         return &state->base;
4635
4636 fail:
4637         list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4638                 drm_dp_mst_put_port_malloc(pos->port);
4639                 kfree(pos);
4640         }
4641         kfree(state);
4642
4643         return NULL;
4644 }
4645
4646 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4647                                      struct drm_private_state *state)
4648 {
4649         struct drm_dp_mst_topology_state *mst_state =
4650                 to_dp_mst_topology_state(state);
4651         struct drm_dp_vcpi_allocation *pos, *tmp;
4652
4653         list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4654                 /* We only keep references to ports with non-zero VCPIs */
4655                 if (pos->vcpi)
4656                         drm_dp_mst_put_port_malloc(pos->port);
4657                 kfree(pos);
4658         }
4659
4660         kfree(mst_state);
4661 }
4662
4663 static inline int
4664 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
4665                                        struct drm_dp_mst_topology_state *mst_state)
4666 {
4667         struct drm_dp_vcpi_allocation *vcpi;
4668         int avail_slots = 63, payload_count = 0;
4669
4670         list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4671                 /* Releasing VCPI is always OK-even if the port is gone */
4672                 if (!vcpi->vcpi) {
4673                         DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4674                                          vcpi->port);
4675                         continue;
4676                 }
4677
4678                 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4679                                  vcpi->port, vcpi->vcpi);
4680
4681                 avail_slots -= vcpi->vcpi;
4682                 if (avail_slots < 0) {
4683                         DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4684                                          vcpi->port, mst_state,
4685                                          avail_slots + vcpi->vcpi);
4686                         return -ENOSPC;
4687                 }
4688
4689                 if (++payload_count > mgr->max_payloads) {
4690                         DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4691                                          mgr, mst_state, mgr->max_payloads);
4692                         return -EINVAL;
4693                 }
4694         }
4695         DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4696                          mgr, mst_state, avail_slots,
4697                          63 - avail_slots);
4698
4699         return 0;
4700 }
4701
4702 /**
4703  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
4704  * atomic update is valid
4705  * @state: Pointer to the new &struct drm_dp_mst_topology_state
4706  *
4707  * Checks the given topology state for an atomic update to ensure that it's
4708  * valid. This includes checking whether there's enough bandwidth to support
4709  * the new VCPI allocations in the atomic update.
4710  *
4711  * Any atomic drivers supporting DP MST must make sure to call this after
4712  * checking the rest of their state in their
4713  * &drm_mode_config_funcs.atomic_check() callback.
4714  *
4715  * See also:
4716  * drm_dp_atomic_find_vcpi_slots()
4717  * drm_dp_atomic_release_vcpi_slots()
4718  *
4719  * Returns:
4720  *
4721  * 0 if the new state is valid, negative error code otherwise.
4722  */
4723 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
4724 {
4725         struct drm_dp_mst_topology_mgr *mgr;
4726         struct drm_dp_mst_topology_state *mst_state;
4727         int i, ret = 0;
4728
4729         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4730                 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
4731                 if (ret)
4732                         break;
4733         }
4734
4735         return ret;
4736 }
4737 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
4738
4739 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
4740         .atomic_duplicate_state = drm_dp_mst_duplicate_state,
4741         .atomic_destroy_state = drm_dp_mst_destroy_state,
4742 };
4743 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
4744
4745 /**
4746  * drm_atomic_get_mst_topology_state: get MST topology state
4747  *
4748  * @state: global atomic state
4749  * @mgr: MST topology manager, also the private object in this case
4750  *
4751  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
4752  * state vtable so that the private object state returned is that of a MST
4753  * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
4754  * to care of the locking, so warn if don't hold the connection_mutex.
4755  *
4756  * RETURNS:
4757  *
4758  * The MST topology state or error pointer.
4759  */
4760 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
4761                                                                     struct drm_dp_mst_topology_mgr *mgr)
4762 {
4763         return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
4764 }
4765 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
4766
4767 /**
4768  * drm_dp_mst_topology_mgr_init - initialise a topology manager
4769  * @mgr: manager struct to initialise
4770  * @dev: device providing this structure - for i2c addition.
4771  * @aux: DP helper aux channel to talk to this device
4772  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
4773  * @max_payloads: maximum number of payloads this GPU can source
4774  * @conn_base_id: the connector object ID the MST device is connected to.
4775  *
4776  * Return 0 for success, or negative error code on failure
4777  */
4778 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4779                                  struct drm_device *dev, struct drm_dp_aux *aux,
4780                                  int max_dpcd_transaction_bytes,
4781                                  int max_payloads, int conn_base_id)
4782 {
4783         struct drm_dp_mst_topology_state *mst_state;
4784
4785         mutex_init(&mgr->lock);
4786         mutex_init(&mgr->qlock);
4787         mutex_init(&mgr->payload_lock);
4788         mutex_init(&mgr->delayed_destroy_lock);
4789         mutex_init(&mgr->up_req_lock);
4790         mutex_init(&mgr->probe_lock);
4791 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4792         mutex_init(&mgr->topology_ref_history_lock);
4793 #endif
4794         INIT_LIST_HEAD(&mgr->tx_msg_downq);
4795         INIT_LIST_HEAD(&mgr->destroy_port_list);
4796         INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
4797         INIT_LIST_HEAD(&mgr->up_req_list);
4798         INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
4799         INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
4800         INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
4801         INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
4802         init_waitqueue_head(&mgr->tx_waitq);
4803         mgr->dev = dev;
4804         mgr->aux = aux;
4805         mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
4806         mgr->max_payloads = max_payloads;
4807         mgr->conn_base_id = conn_base_id;
4808         if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
4809             max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
4810                 return -EINVAL;
4811         mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
4812         if (!mgr->payloads)
4813                 return -ENOMEM;
4814         mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
4815         if (!mgr->proposed_vcpis)
4816                 return -ENOMEM;
4817         set_bit(0, &mgr->payload_mask);
4818
4819         mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
4820         if (mst_state == NULL)
4821                 return -ENOMEM;
4822
4823         mst_state->mgr = mgr;
4824         INIT_LIST_HEAD(&mst_state->vcpis);
4825
4826         drm_atomic_private_obj_init(dev, &mgr->base,
4827                                     &mst_state->base,
4828                                     &drm_dp_mst_topology_state_funcs);
4829
4830         return 0;
4831 }
4832 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4833
4834 /**
4835  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4836  * @mgr: manager to destroy
4837  */
4838 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4839 {
4840         drm_dp_mst_topology_mgr_set_mst(mgr, false);
4841         flush_work(&mgr->work);
4842         cancel_work_sync(&mgr->delayed_destroy_work);
4843         mutex_lock(&mgr->payload_lock);
4844         kfree(mgr->payloads);
4845         mgr->payloads = NULL;
4846         kfree(mgr->proposed_vcpis);
4847         mgr->proposed_vcpis = NULL;
4848         mutex_unlock(&mgr->payload_lock);
4849         mgr->dev = NULL;
4850         mgr->aux = NULL;
4851         drm_atomic_private_obj_fini(&mgr->base);
4852         mgr->funcs = NULL;
4853
4854         mutex_destroy(&mgr->delayed_destroy_lock);
4855         mutex_destroy(&mgr->payload_lock);
4856         mutex_destroy(&mgr->qlock);
4857         mutex_destroy(&mgr->lock);
4858         mutex_destroy(&mgr->up_req_lock);
4859         mutex_destroy(&mgr->probe_lock);
4860 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
4861         mutex_destroy(&mgr->topology_ref_history_lock);
4862 #endif
4863 }
4864 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4865
4866 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4867 {
4868         int i;
4869
4870         if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4871                 return false;
4872
4873         for (i = 0; i < num - 1; i++) {
4874                 if (msgs[i].flags & I2C_M_RD ||
4875                     msgs[i].len > 0xff)
4876                         return false;
4877         }
4878
4879         return msgs[num - 1].flags & I2C_M_RD &&
4880                 msgs[num - 1].len <= 0xff;
4881 }
4882
4883 /* I2C device */
4884 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4885                                int num)
4886 {
4887         struct drm_dp_aux *aux = adapter->algo_data;
4888         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4889         struct drm_dp_mst_branch *mstb;
4890         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4891         unsigned int i;
4892         struct drm_dp_sideband_msg_req_body msg;
4893         struct drm_dp_sideband_msg_tx *txmsg = NULL;
4894         int ret;
4895
4896         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4897         if (!mstb)
4898                 return -EREMOTEIO;
4899
4900         if (!remote_i2c_read_ok(msgs, num)) {
4901                 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4902                 ret = -EIO;
4903                 goto out;
4904         }
4905
4906         memset(&msg, 0, sizeof(msg));
4907         msg.req_type = DP_REMOTE_I2C_READ;
4908         msg.u.i2c_read.num_transactions = num - 1;
4909         msg.u.i2c_read.port_number = port->port_num;
4910         for (i = 0; i < num - 1; i++) {
4911                 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4912                 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4913                 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4914                 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4915         }
4916         msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4917         msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4918
4919         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4920         if (!txmsg) {
4921                 ret = -ENOMEM;
4922                 goto out;
4923         }
4924
4925         txmsg->dst = mstb;
4926         drm_dp_encode_sideband_req(&msg, txmsg);
4927
4928         drm_dp_queue_down_tx(mgr, txmsg);
4929
4930         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4931         if (ret > 0) {
4932
4933                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4934                         ret = -EREMOTEIO;
4935                         goto out;
4936                 }
4937                 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4938                         ret = -EIO;
4939                         goto out;
4940                 }
4941                 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4942                 ret = num;
4943         }
4944 out:
4945         kfree(txmsg);
4946         drm_dp_mst_topology_put_mstb(mstb);
4947         return ret;
4948 }
4949
4950 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4951 {
4952         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4953                I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4954                I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4955                I2C_FUNC_10BIT_ADDR;
4956 }
4957
4958 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4959         .functionality = drm_dp_mst_i2c_functionality,
4960         .master_xfer = drm_dp_mst_i2c_xfer,
4961 };
4962
4963 /**
4964  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
4965  * @aux: DisplayPort AUX channel
4966  *
4967  * Returns 0 on success or a negative error code on failure.
4968  */
4969 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4970 {
4971         aux->ddc.algo = &drm_dp_mst_i2c_algo;
4972         aux->ddc.algo_data = aux;
4973         aux->ddc.retries = 3;
4974
4975         aux->ddc.class = I2C_CLASS_DDC;
4976         aux->ddc.owner = THIS_MODULE;
4977         aux->ddc.dev.parent = aux->dev;
4978         aux->ddc.dev.of_node = aux->dev->of_node;
4979
4980         strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4981                 sizeof(aux->ddc.name));
4982
4983         return i2c_add_adapter(&aux->ddc);
4984 }
4985
4986 /**
4987  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4988  * @aux: DisplayPort AUX channel
4989  */
4990 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4991 {
4992         i2c_del_adapter(&aux->ddc);
4993 }