drm/dp_mst: Refactor drm_dp_mst_handle_down_rep()
[linux-2.6-microblaze.git] / drivers / gpu / drm / drm_dp_mst_topology.c
1 /*
2  * Copyright © 2014 Red Hat
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
30
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_dp_mst_helper.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_fixed.h>
36 #include <drm/drm_print.h>
37 #include <drm/drm_probe_helper.h>
38
39 #include "drm_crtc_helper_internal.h"
40 #include "drm_dp_mst_topology_internal.h"
41
42 /**
43  * DOC: dp mst helper
44  *
45  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
46  * protocol. The helpers contain a topology manager and bandwidth manager.
47  * The helpers encapsulate the sending and received of sideband msgs.
48  */
49 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
50                                   char *buf);
51
52 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
53
54 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
55                                      int id,
56                                      struct drm_dp_payload *payload);
57
58 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
59                                  struct drm_dp_mst_port *port,
60                                  int offset, int size, u8 *bytes);
61 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
62                                   struct drm_dp_mst_port *port,
63                                   int offset, int size, u8 *bytes);
64
65 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
66                                      struct drm_dp_mst_branch *mstb);
67 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
68                                            struct drm_dp_mst_branch *mstb,
69                                            struct drm_dp_mst_port *port);
70 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
71                                  u8 *guid);
72
73 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
74 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
75 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
76
77 #define DBG_PREFIX "[dp_mst]"
78
79 #define DP_STR(x) [DP_ ## x] = #x
80
81 static const char *drm_dp_mst_req_type_str(u8 req_type)
82 {
83         static const char * const req_type_str[] = {
84                 DP_STR(GET_MSG_TRANSACTION_VERSION),
85                 DP_STR(LINK_ADDRESS),
86                 DP_STR(CONNECTION_STATUS_NOTIFY),
87                 DP_STR(ENUM_PATH_RESOURCES),
88                 DP_STR(ALLOCATE_PAYLOAD),
89                 DP_STR(QUERY_PAYLOAD),
90                 DP_STR(RESOURCE_STATUS_NOTIFY),
91                 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
92                 DP_STR(REMOTE_DPCD_READ),
93                 DP_STR(REMOTE_DPCD_WRITE),
94                 DP_STR(REMOTE_I2C_READ),
95                 DP_STR(REMOTE_I2C_WRITE),
96                 DP_STR(POWER_UP_PHY),
97                 DP_STR(POWER_DOWN_PHY),
98                 DP_STR(SINK_EVENT_NOTIFY),
99                 DP_STR(QUERY_STREAM_ENC_STATUS),
100         };
101
102         if (req_type >= ARRAY_SIZE(req_type_str) ||
103             !req_type_str[req_type])
104                 return "unknown";
105
106         return req_type_str[req_type];
107 }
108
109 #undef DP_STR
110 #define DP_STR(x) [DP_NAK_ ## x] = #x
111
112 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
113 {
114         static const char * const nak_reason_str[] = {
115                 DP_STR(WRITE_FAILURE),
116                 DP_STR(INVALID_READ),
117                 DP_STR(CRC_FAILURE),
118                 DP_STR(BAD_PARAM),
119                 DP_STR(DEFER),
120                 DP_STR(LINK_FAILURE),
121                 DP_STR(NO_RESOURCES),
122                 DP_STR(DPCD_FAIL),
123                 DP_STR(I2C_NAK),
124                 DP_STR(ALLOCATE_FAIL),
125         };
126
127         if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
128             !nak_reason_str[nak_reason])
129                 return "unknown";
130
131         return nak_reason_str[nak_reason];
132 }
133
134 #undef DP_STR
135 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
136
137 static const char *drm_dp_mst_sideband_tx_state_str(int state)
138 {
139         static const char * const sideband_reason_str[] = {
140                 DP_STR(QUEUED),
141                 DP_STR(START_SEND),
142                 DP_STR(SENT),
143                 DP_STR(RX),
144                 DP_STR(TIMEOUT),
145         };
146
147         if (state >= ARRAY_SIZE(sideband_reason_str) ||
148             !sideband_reason_str[state])
149                 return "unknown";
150
151         return sideband_reason_str[state];
152 }
153
154 static int
155 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
156 {
157         int i;
158         u8 unpacked_rad[16];
159
160         for (i = 0; i < lct; i++) {
161                 if (i % 2)
162                         unpacked_rad[i] = rad[i / 2] >> 4;
163                 else
164                         unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
165         }
166
167         /* TODO: Eventually add something to printk so we can format the rad
168          * like this: 1.2.3
169          */
170         return snprintf(out, len, "%*phC", lct, unpacked_rad);
171 }
172
173 /* sideband msg handling */
174 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
175 {
176         u8 bitmask = 0x80;
177         u8 bitshift = 7;
178         u8 array_index = 0;
179         int number_of_bits = num_nibbles * 4;
180         u8 remainder = 0;
181
182         while (number_of_bits != 0) {
183                 number_of_bits--;
184                 remainder <<= 1;
185                 remainder |= (data[array_index] & bitmask) >> bitshift;
186                 bitmask >>= 1;
187                 bitshift--;
188                 if (bitmask == 0) {
189                         bitmask = 0x80;
190                         bitshift = 7;
191                         array_index++;
192                 }
193                 if ((remainder & 0x10) == 0x10)
194                         remainder ^= 0x13;
195         }
196
197         number_of_bits = 4;
198         while (number_of_bits != 0) {
199                 number_of_bits--;
200                 remainder <<= 1;
201                 if ((remainder & 0x10) != 0)
202                         remainder ^= 0x13;
203         }
204
205         return remainder;
206 }
207
208 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
209 {
210         u8 bitmask = 0x80;
211         u8 bitshift = 7;
212         u8 array_index = 0;
213         int number_of_bits = number_of_bytes * 8;
214         u16 remainder = 0;
215
216         while (number_of_bits != 0) {
217                 number_of_bits--;
218                 remainder <<= 1;
219                 remainder |= (data[array_index] & bitmask) >> bitshift;
220                 bitmask >>= 1;
221                 bitshift--;
222                 if (bitmask == 0) {
223                         bitmask = 0x80;
224                         bitshift = 7;
225                         array_index++;
226                 }
227                 if ((remainder & 0x100) == 0x100)
228                         remainder ^= 0xd5;
229         }
230
231         number_of_bits = 8;
232         while (number_of_bits != 0) {
233                 number_of_bits--;
234                 remainder <<= 1;
235                 if ((remainder & 0x100) != 0)
236                         remainder ^= 0xd5;
237         }
238
239         return remainder & 0xff;
240 }
241 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
242 {
243         u8 size = 3;
244         size += (hdr->lct / 2);
245         return size;
246 }
247
248 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
249                                            u8 *buf, int *len)
250 {
251         int idx = 0;
252         int i;
253         u8 crc4;
254         buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
255         for (i = 0; i < (hdr->lct / 2); i++)
256                 buf[idx++] = hdr->rad[i];
257         buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
258                 (hdr->msg_len & 0x3f);
259         buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
260
261         crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
262         buf[idx - 1] |= (crc4 & 0xf);
263
264         *len = idx;
265 }
266
267 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
268                                            u8 *buf, int buflen, u8 *hdrlen)
269 {
270         u8 crc4;
271         u8 len;
272         int i;
273         u8 idx;
274         if (buf[0] == 0)
275                 return false;
276         len = 3;
277         len += ((buf[0] & 0xf0) >> 4) / 2;
278         if (len > buflen)
279                 return false;
280         crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
281
282         if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
283                 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
284                 return false;
285         }
286
287         hdr->lct = (buf[0] & 0xf0) >> 4;
288         hdr->lcr = (buf[0] & 0xf);
289         idx = 1;
290         for (i = 0; i < (hdr->lct / 2); i++)
291                 hdr->rad[i] = buf[idx++];
292         hdr->broadcast = (buf[idx] >> 7) & 0x1;
293         hdr->path_msg = (buf[idx] >> 6) & 0x1;
294         hdr->msg_len = buf[idx] & 0x3f;
295         idx++;
296         hdr->somt = (buf[idx] >> 7) & 0x1;
297         hdr->eomt = (buf[idx] >> 6) & 0x1;
298         hdr->seqno = (buf[idx] >> 4) & 0x1;
299         idx++;
300         *hdrlen = idx;
301         return true;
302 }
303
304 void
305 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
306                            struct drm_dp_sideband_msg_tx *raw)
307 {
308         int idx = 0;
309         int i;
310         u8 *buf = raw->msg;
311         buf[idx++] = req->req_type & 0x7f;
312
313         switch (req->req_type) {
314         case DP_ENUM_PATH_RESOURCES:
315         case DP_POWER_DOWN_PHY:
316         case DP_POWER_UP_PHY:
317                 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
318                 idx++;
319                 break;
320         case DP_ALLOCATE_PAYLOAD:
321                 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
322                         (req->u.allocate_payload.number_sdp_streams & 0xf);
323                 idx++;
324                 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
325                 idx++;
326                 buf[idx] = (req->u.allocate_payload.pbn >> 8);
327                 idx++;
328                 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
329                 idx++;
330                 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
331                         buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
332                                 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
333                         idx++;
334                 }
335                 if (req->u.allocate_payload.number_sdp_streams & 1) {
336                         i = req->u.allocate_payload.number_sdp_streams - 1;
337                         buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
338                         idx++;
339                 }
340                 break;
341         case DP_QUERY_PAYLOAD:
342                 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
343                 idx++;
344                 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
345                 idx++;
346                 break;
347         case DP_REMOTE_DPCD_READ:
348                 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
349                 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
350                 idx++;
351                 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
352                 idx++;
353                 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
354                 idx++;
355                 buf[idx] = (req->u.dpcd_read.num_bytes);
356                 idx++;
357                 break;
358
359         case DP_REMOTE_DPCD_WRITE:
360                 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
361                 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
362                 idx++;
363                 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
364                 idx++;
365                 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
366                 idx++;
367                 buf[idx] = (req->u.dpcd_write.num_bytes);
368                 idx++;
369                 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
370                 idx += req->u.dpcd_write.num_bytes;
371                 break;
372         case DP_REMOTE_I2C_READ:
373                 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
374                 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
375                 idx++;
376                 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
377                         buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
378                         idx++;
379                         buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
380                         idx++;
381                         memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
382                         idx += req->u.i2c_read.transactions[i].num_bytes;
383
384                         buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
385                         buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
386                         idx++;
387                 }
388                 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
389                 idx++;
390                 buf[idx] = (req->u.i2c_read.num_bytes_read);
391                 idx++;
392                 break;
393
394         case DP_REMOTE_I2C_WRITE:
395                 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
396                 idx++;
397                 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
398                 idx++;
399                 buf[idx] = (req->u.i2c_write.num_bytes);
400                 idx++;
401                 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
402                 idx += req->u.i2c_write.num_bytes;
403                 break;
404         }
405         raw->cur_len = idx;
406 }
407 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
408
409 /* Decode a sideband request we've encoded, mainly used for debugging */
410 int
411 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
412                            struct drm_dp_sideband_msg_req_body *req)
413 {
414         const u8 *buf = raw->msg;
415         int i, idx = 0;
416
417         req->req_type = buf[idx++] & 0x7f;
418         switch (req->req_type) {
419         case DP_ENUM_PATH_RESOURCES:
420         case DP_POWER_DOWN_PHY:
421         case DP_POWER_UP_PHY:
422                 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
423                 break;
424         case DP_ALLOCATE_PAYLOAD:
425                 {
426                         struct drm_dp_allocate_payload *a =
427                                 &req->u.allocate_payload;
428
429                         a->number_sdp_streams = buf[idx] & 0xf;
430                         a->port_number = (buf[idx] >> 4) & 0xf;
431
432                         WARN_ON(buf[++idx] & 0x80);
433                         a->vcpi = buf[idx] & 0x7f;
434
435                         a->pbn = buf[++idx] << 8;
436                         a->pbn |= buf[++idx];
437
438                         idx++;
439                         for (i = 0; i < a->number_sdp_streams; i++) {
440                                 a->sdp_stream_sink[i] =
441                                         (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
442                         }
443                 }
444                 break;
445         case DP_QUERY_PAYLOAD:
446                 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
447                 WARN_ON(buf[++idx] & 0x80);
448                 req->u.query_payload.vcpi = buf[idx] & 0x7f;
449                 break;
450         case DP_REMOTE_DPCD_READ:
451                 {
452                         struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
453
454                         r->port_number = (buf[idx] >> 4) & 0xf;
455
456                         r->dpcd_address = (buf[idx] << 16) & 0xf0000;
457                         r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
458                         r->dpcd_address |= buf[++idx] & 0xff;
459
460                         r->num_bytes = buf[++idx];
461                 }
462                 break;
463         case DP_REMOTE_DPCD_WRITE:
464                 {
465                         struct drm_dp_remote_dpcd_write *w =
466                                 &req->u.dpcd_write;
467
468                         w->port_number = (buf[idx] >> 4) & 0xf;
469
470                         w->dpcd_address = (buf[idx] << 16) & 0xf0000;
471                         w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
472                         w->dpcd_address |= buf[++idx] & 0xff;
473
474                         w->num_bytes = buf[++idx];
475
476                         w->bytes = kmemdup(&buf[++idx], w->num_bytes,
477                                            GFP_KERNEL);
478                         if (!w->bytes)
479                                 return -ENOMEM;
480                 }
481                 break;
482         case DP_REMOTE_I2C_READ:
483                 {
484                         struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
485                         struct drm_dp_remote_i2c_read_tx *tx;
486                         bool failed = false;
487
488                         r->num_transactions = buf[idx] & 0x3;
489                         r->port_number = (buf[idx] >> 4) & 0xf;
490                         for (i = 0; i < r->num_transactions; i++) {
491                                 tx = &r->transactions[i];
492
493                                 tx->i2c_dev_id = buf[++idx] & 0x7f;
494                                 tx->num_bytes = buf[++idx];
495                                 tx->bytes = kmemdup(&buf[++idx],
496                                                     tx->num_bytes,
497                                                     GFP_KERNEL);
498                                 if (!tx->bytes) {
499                                         failed = true;
500                                         break;
501                                 }
502                                 idx += tx->num_bytes;
503                                 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
504                                 tx->i2c_transaction_delay = buf[idx] & 0xf;
505                         }
506
507                         if (failed) {
508                                 for (i = 0; i < r->num_transactions; i++)
509                                         kfree(tx->bytes);
510                                 return -ENOMEM;
511                         }
512
513                         r->read_i2c_device_id = buf[++idx] & 0x7f;
514                         r->num_bytes_read = buf[++idx];
515                 }
516                 break;
517         case DP_REMOTE_I2C_WRITE:
518                 {
519                         struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
520
521                         w->port_number = (buf[idx] >> 4) & 0xf;
522                         w->write_i2c_device_id = buf[++idx] & 0x7f;
523                         w->num_bytes = buf[++idx];
524                         w->bytes = kmemdup(&buf[++idx], w->num_bytes,
525                                            GFP_KERNEL);
526                         if (!w->bytes)
527                                 return -ENOMEM;
528                 }
529                 break;
530         }
531
532         return 0;
533 }
534 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
535
536 void
537 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
538                                   int indent, struct drm_printer *printer)
539 {
540         int i;
541
542 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
543         if (req->req_type == DP_LINK_ADDRESS) {
544                 /* No contents to print */
545                 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
546                 return;
547         }
548
549         P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
550         indent++;
551
552         switch (req->req_type) {
553         case DP_ENUM_PATH_RESOURCES:
554         case DP_POWER_DOWN_PHY:
555         case DP_POWER_UP_PHY:
556                 P("port=%d\n", req->u.port_num.port_number);
557                 break;
558         case DP_ALLOCATE_PAYLOAD:
559                 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
560                   req->u.allocate_payload.port_number,
561                   req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
562                   req->u.allocate_payload.number_sdp_streams,
563                   req->u.allocate_payload.number_sdp_streams,
564                   req->u.allocate_payload.sdp_stream_sink);
565                 break;
566         case DP_QUERY_PAYLOAD:
567                 P("port=%d vcpi=%d\n",
568                   req->u.query_payload.port_number,
569                   req->u.query_payload.vcpi);
570                 break;
571         case DP_REMOTE_DPCD_READ:
572                 P("port=%d dpcd_addr=%05x len=%d\n",
573                   req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
574                   req->u.dpcd_read.num_bytes);
575                 break;
576         case DP_REMOTE_DPCD_WRITE:
577                 P("port=%d addr=%05x len=%d: %*ph\n",
578                   req->u.dpcd_write.port_number,
579                   req->u.dpcd_write.dpcd_address,
580                   req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
581                   req->u.dpcd_write.bytes);
582                 break;
583         case DP_REMOTE_I2C_READ:
584                 P("port=%d num_tx=%d id=%d size=%d:\n",
585                   req->u.i2c_read.port_number,
586                   req->u.i2c_read.num_transactions,
587                   req->u.i2c_read.read_i2c_device_id,
588                   req->u.i2c_read.num_bytes_read);
589
590                 indent++;
591                 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
592                         const struct drm_dp_remote_i2c_read_tx *rtx =
593                                 &req->u.i2c_read.transactions[i];
594
595                         P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
596                           i, rtx->i2c_dev_id, rtx->num_bytes,
597                           rtx->no_stop_bit, rtx->i2c_transaction_delay,
598                           rtx->num_bytes, rtx->bytes);
599                 }
600                 break;
601         case DP_REMOTE_I2C_WRITE:
602                 P("port=%d id=%d size=%d: %*ph\n",
603                   req->u.i2c_write.port_number,
604                   req->u.i2c_write.write_i2c_device_id,
605                   req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
606                   req->u.i2c_write.bytes);
607                 break;
608         default:
609                 P("???\n");
610                 break;
611         }
612 #undef P
613 }
614 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
615
616 static inline void
617 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
618                                 const struct drm_dp_sideband_msg_tx *txmsg)
619 {
620         struct drm_dp_sideband_msg_req_body req;
621         char buf[64];
622         int ret;
623         int i;
624
625         drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
626                               sizeof(buf));
627         drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
628                    txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
629                    drm_dp_mst_sideband_tx_state_str(txmsg->state),
630                    txmsg->path_msg, buf);
631
632         ret = drm_dp_decode_sideband_req(txmsg, &req);
633         if (ret) {
634                 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
635                 return;
636         }
637         drm_dp_dump_sideband_msg_req_body(&req, 1, p);
638
639         switch (req.req_type) {
640         case DP_REMOTE_DPCD_WRITE:
641                 kfree(req.u.dpcd_write.bytes);
642                 break;
643         case DP_REMOTE_I2C_READ:
644                 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
645                         kfree(req.u.i2c_read.transactions[i].bytes);
646                 break;
647         case DP_REMOTE_I2C_WRITE:
648                 kfree(req.u.i2c_write.bytes);
649                 break;
650         }
651 }
652
653 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
654 {
655         u8 crc4;
656         crc4 = drm_dp_msg_data_crc4(msg, len);
657         msg[len] = crc4;
658 }
659
660 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
661                                          struct drm_dp_sideband_msg_tx *raw)
662 {
663         int idx = 0;
664         u8 *buf = raw->msg;
665
666         buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
667
668         raw->cur_len = idx;
669 }
670
671 /* this adds a chunk of msg to the builder to get the final msg */
672 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
673                                       u8 *replybuf, u8 replybuflen, bool hdr)
674 {
675         int ret;
676         u8 crc4;
677
678         if (hdr) {
679                 u8 hdrlen;
680                 struct drm_dp_sideband_msg_hdr recv_hdr;
681                 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
682                 if (ret == false) {
683                         print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
684                         return false;
685                 }
686
687                 /*
688                  * ignore out-of-order messages or messages that are part of a
689                  * failed transaction
690                  */
691                 if (!recv_hdr.somt && !msg->have_somt)
692                         return false;
693
694                 /* get length contained in this portion */
695                 msg->curchunk_len = recv_hdr.msg_len;
696                 msg->curchunk_hdrlen = hdrlen;
697
698                 /* we have already gotten an somt - don't bother parsing */
699                 if (recv_hdr.somt && msg->have_somt)
700                         return false;
701
702                 if (recv_hdr.somt) {
703                         memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
704                         msg->have_somt = true;
705                 }
706                 if (recv_hdr.eomt)
707                         msg->have_eomt = true;
708
709                 /* copy the bytes for the remainder of this header chunk */
710                 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
711                 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
712         } else {
713                 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
714                 msg->curchunk_idx += replybuflen;
715         }
716
717         if (msg->curchunk_idx >= msg->curchunk_len) {
718                 /* do CRC */
719                 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
720                 /* copy chunk into bigger msg */
721                 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
722                 msg->curlen += msg->curchunk_len - 1;
723         }
724         return true;
725 }
726
727 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
728                                                struct drm_dp_sideband_msg_reply_body *repmsg)
729 {
730         int idx = 1;
731         int i;
732         memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
733         idx += 16;
734         repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
735         idx++;
736         if (idx > raw->curlen)
737                 goto fail_len;
738         for (i = 0; i < repmsg->u.link_addr.nports; i++) {
739                 if (raw->msg[idx] & 0x80)
740                         repmsg->u.link_addr.ports[i].input_port = 1;
741
742                 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
743                 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
744
745                 idx++;
746                 if (idx > raw->curlen)
747                         goto fail_len;
748                 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
749                 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
750                 if (repmsg->u.link_addr.ports[i].input_port == 0)
751                         repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
752                 idx++;
753                 if (idx > raw->curlen)
754                         goto fail_len;
755                 if (repmsg->u.link_addr.ports[i].input_port == 0) {
756                         repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
757                         idx++;
758                         if (idx > raw->curlen)
759                                 goto fail_len;
760                         memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
761                         idx += 16;
762                         if (idx > raw->curlen)
763                                 goto fail_len;
764                         repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
765                         repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
766                         idx++;
767
768                 }
769                 if (idx > raw->curlen)
770                         goto fail_len;
771         }
772
773         return true;
774 fail_len:
775         DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
776         return false;
777 }
778
779 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
780                                                    struct drm_dp_sideband_msg_reply_body *repmsg)
781 {
782         int idx = 1;
783         repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
784         idx++;
785         if (idx > raw->curlen)
786                 goto fail_len;
787         repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
788         idx++;
789         if (idx > raw->curlen)
790                 goto fail_len;
791
792         memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
793         return true;
794 fail_len:
795         DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
796         return false;
797 }
798
799 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
800                                                       struct drm_dp_sideband_msg_reply_body *repmsg)
801 {
802         int idx = 1;
803         repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
804         idx++;
805         if (idx > raw->curlen)
806                 goto fail_len;
807         return true;
808 fail_len:
809         DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
810         return false;
811 }
812
813 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
814                                                       struct drm_dp_sideband_msg_reply_body *repmsg)
815 {
816         int idx = 1;
817
818         repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
819         idx++;
820         if (idx > raw->curlen)
821                 goto fail_len;
822         repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
823         idx++;
824         /* TODO check */
825         memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
826         return true;
827 fail_len:
828         DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
829         return false;
830 }
831
832 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
833                                                           struct drm_dp_sideband_msg_reply_body *repmsg)
834 {
835         int idx = 1;
836         repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
837         idx++;
838         if (idx > raw->curlen)
839                 goto fail_len;
840         repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
841         idx += 2;
842         if (idx > raw->curlen)
843                 goto fail_len;
844         repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
845         idx += 2;
846         if (idx > raw->curlen)
847                 goto fail_len;
848         return true;
849 fail_len:
850         DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
851         return false;
852 }
853
854 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
855                                                           struct drm_dp_sideband_msg_reply_body *repmsg)
856 {
857         int idx = 1;
858         repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
859         idx++;
860         if (idx > raw->curlen)
861                 goto fail_len;
862         repmsg->u.allocate_payload.vcpi = raw->msg[idx];
863         idx++;
864         if (idx > raw->curlen)
865                 goto fail_len;
866         repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
867         idx += 2;
868         if (idx > raw->curlen)
869                 goto fail_len;
870         return true;
871 fail_len:
872         DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
873         return false;
874 }
875
876 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
877                                                     struct drm_dp_sideband_msg_reply_body *repmsg)
878 {
879         int idx = 1;
880         repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
881         idx++;
882         if (idx > raw->curlen)
883                 goto fail_len;
884         repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
885         idx += 2;
886         if (idx > raw->curlen)
887                 goto fail_len;
888         return true;
889 fail_len:
890         DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
891         return false;
892 }
893
894 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
895                                                        struct drm_dp_sideband_msg_reply_body *repmsg)
896 {
897         int idx = 1;
898
899         repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
900         idx++;
901         if (idx > raw->curlen) {
902                 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
903                               idx, raw->curlen);
904                 return false;
905         }
906         return true;
907 }
908
909 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
910                                         struct drm_dp_sideband_msg_reply_body *msg)
911 {
912         memset(msg, 0, sizeof(*msg));
913         msg->reply_type = (raw->msg[0] & 0x80) >> 7;
914         msg->req_type = (raw->msg[0] & 0x7f);
915
916         if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
917                 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
918                 msg->u.nak.reason = raw->msg[17];
919                 msg->u.nak.nak_data = raw->msg[18];
920                 return false;
921         }
922
923         switch (msg->req_type) {
924         case DP_LINK_ADDRESS:
925                 return drm_dp_sideband_parse_link_address(raw, msg);
926         case DP_QUERY_PAYLOAD:
927                 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
928         case DP_REMOTE_DPCD_READ:
929                 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
930         case DP_REMOTE_DPCD_WRITE:
931                 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
932         case DP_REMOTE_I2C_READ:
933                 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
934         case DP_ENUM_PATH_RESOURCES:
935                 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
936         case DP_ALLOCATE_PAYLOAD:
937                 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
938         case DP_POWER_DOWN_PHY:
939         case DP_POWER_UP_PHY:
940                 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
941         default:
942                 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
943                           drm_dp_mst_req_type_str(msg->req_type));
944                 return false;
945         }
946 }
947
948 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
949                                                            struct drm_dp_sideband_msg_req_body *msg)
950 {
951         int idx = 1;
952
953         msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
954         idx++;
955         if (idx > raw->curlen)
956                 goto fail_len;
957
958         memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
959         idx += 16;
960         if (idx > raw->curlen)
961                 goto fail_len;
962
963         msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
964         msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
965         msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
966         msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
967         msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
968         idx++;
969         return true;
970 fail_len:
971         DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
972         return false;
973 }
974
975 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
976                                                            struct drm_dp_sideband_msg_req_body *msg)
977 {
978         int idx = 1;
979
980         msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
981         idx++;
982         if (idx > raw->curlen)
983                 goto fail_len;
984
985         memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
986         idx += 16;
987         if (idx > raw->curlen)
988                 goto fail_len;
989
990         msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
991         idx++;
992         return true;
993 fail_len:
994         DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
995         return false;
996 }
997
998 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
999                                       struct drm_dp_sideband_msg_req_body *msg)
1000 {
1001         memset(msg, 0, sizeof(*msg));
1002         msg->req_type = (raw->msg[0] & 0x7f);
1003
1004         switch (msg->req_type) {
1005         case DP_CONNECTION_STATUS_NOTIFY:
1006                 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1007         case DP_RESOURCE_STATUS_NOTIFY:
1008                 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1009         default:
1010                 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1011                           drm_dp_mst_req_type_str(msg->req_type));
1012                 return false;
1013         }
1014 }
1015
1016 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1017 {
1018         struct drm_dp_sideband_msg_req_body req;
1019
1020         req.req_type = DP_REMOTE_DPCD_WRITE;
1021         req.u.dpcd_write.port_number = port_num;
1022         req.u.dpcd_write.dpcd_address = offset;
1023         req.u.dpcd_write.num_bytes = num_bytes;
1024         req.u.dpcd_write.bytes = bytes;
1025         drm_dp_encode_sideband_req(&req, msg);
1026
1027         return 0;
1028 }
1029
1030 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1031 {
1032         struct drm_dp_sideband_msg_req_body req;
1033
1034         req.req_type = DP_LINK_ADDRESS;
1035         drm_dp_encode_sideband_req(&req, msg);
1036         return 0;
1037 }
1038
1039 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1040 {
1041         struct drm_dp_sideband_msg_req_body req;
1042
1043         req.req_type = DP_ENUM_PATH_RESOURCES;
1044         req.u.port_num.port_number = port_num;
1045         drm_dp_encode_sideband_req(&req, msg);
1046         msg->path_msg = true;
1047         return 0;
1048 }
1049
1050 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1051                                   u8 vcpi, uint16_t pbn,
1052                                   u8 number_sdp_streams,
1053                                   u8 *sdp_stream_sink)
1054 {
1055         struct drm_dp_sideband_msg_req_body req;
1056         memset(&req, 0, sizeof(req));
1057         req.req_type = DP_ALLOCATE_PAYLOAD;
1058         req.u.allocate_payload.port_number = port_num;
1059         req.u.allocate_payload.vcpi = vcpi;
1060         req.u.allocate_payload.pbn = pbn;
1061         req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1062         memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1063                    number_sdp_streams);
1064         drm_dp_encode_sideband_req(&req, msg);
1065         msg->path_msg = true;
1066         return 0;
1067 }
1068
1069 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1070                                   int port_num, bool power_up)
1071 {
1072         struct drm_dp_sideband_msg_req_body req;
1073
1074         if (power_up)
1075                 req.req_type = DP_POWER_UP_PHY;
1076         else
1077                 req.req_type = DP_POWER_DOWN_PHY;
1078
1079         req.u.port_num.port_number = port_num;
1080         drm_dp_encode_sideband_req(&req, msg);
1081         msg->path_msg = true;
1082         return 0;
1083 }
1084
1085 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1086                                         struct drm_dp_vcpi *vcpi)
1087 {
1088         int ret, vcpi_ret;
1089
1090         mutex_lock(&mgr->payload_lock);
1091         ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1092         if (ret > mgr->max_payloads) {
1093                 ret = -EINVAL;
1094                 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1095                 goto out_unlock;
1096         }
1097
1098         vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1099         if (vcpi_ret > mgr->max_payloads) {
1100                 ret = -EINVAL;
1101                 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1102                 goto out_unlock;
1103         }
1104
1105         set_bit(ret, &mgr->payload_mask);
1106         set_bit(vcpi_ret, &mgr->vcpi_mask);
1107         vcpi->vcpi = vcpi_ret + 1;
1108         mgr->proposed_vcpis[ret - 1] = vcpi;
1109 out_unlock:
1110         mutex_unlock(&mgr->payload_lock);
1111         return ret;
1112 }
1113
1114 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1115                                       int vcpi)
1116 {
1117         int i;
1118         if (vcpi == 0)
1119                 return;
1120
1121         mutex_lock(&mgr->payload_lock);
1122         DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1123         clear_bit(vcpi - 1, &mgr->vcpi_mask);
1124
1125         for (i = 0; i < mgr->max_payloads; i++) {
1126                 if (mgr->proposed_vcpis[i])
1127                         if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
1128                                 mgr->proposed_vcpis[i] = NULL;
1129                                 clear_bit(i + 1, &mgr->payload_mask);
1130                         }
1131         }
1132         mutex_unlock(&mgr->payload_lock);
1133 }
1134
1135 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1136                               struct drm_dp_sideband_msg_tx *txmsg)
1137 {
1138         unsigned int state;
1139
1140         /*
1141          * All updates to txmsg->state are protected by mgr->qlock, and the two
1142          * cases we check here are terminal states. For those the barriers
1143          * provided by the wake_up/wait_event pair are enough.
1144          */
1145         state = READ_ONCE(txmsg->state);
1146         return (state == DRM_DP_SIDEBAND_TX_RX ||
1147                 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1148 }
1149
1150 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1151                                     struct drm_dp_sideband_msg_tx *txmsg)
1152 {
1153         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1154         int ret;
1155
1156         ret = wait_event_timeout(mgr->tx_waitq,
1157                                  check_txmsg_state(mgr, txmsg),
1158                                  (4 * HZ));
1159         mutex_lock(&mstb->mgr->qlock);
1160         if (ret > 0) {
1161                 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1162                         ret = -EIO;
1163                         goto out;
1164                 }
1165         } else {
1166                 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1167
1168                 /* dump some state */
1169                 ret = -EIO;
1170
1171                 /* remove from q */
1172                 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1173                     txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1174                         list_del(&txmsg->next);
1175                 }
1176
1177                 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1178                     txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1179                         mstb->tx_slots[txmsg->seqno] = NULL;
1180                 }
1181         }
1182 out:
1183         if (unlikely(ret == -EIO && drm_debug & DRM_UT_DP)) {
1184                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1185
1186                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1187         }
1188         mutex_unlock(&mgr->qlock);
1189
1190         return ret;
1191 }
1192
1193 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1194 {
1195         struct drm_dp_mst_branch *mstb;
1196
1197         mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1198         if (!mstb)
1199                 return NULL;
1200
1201         mstb->lct = lct;
1202         if (lct > 1)
1203                 memcpy(mstb->rad, rad, lct / 2);
1204         INIT_LIST_HEAD(&mstb->ports);
1205         kref_init(&mstb->topology_kref);
1206         kref_init(&mstb->malloc_kref);
1207         return mstb;
1208 }
1209
1210 static void drm_dp_free_mst_branch_device(struct kref *kref)
1211 {
1212         struct drm_dp_mst_branch *mstb =
1213                 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1214
1215         if (mstb->port_parent)
1216                 drm_dp_mst_put_port_malloc(mstb->port_parent);
1217
1218         kfree(mstb);
1219 }
1220
1221 /**
1222  * DOC: Branch device and port refcounting
1223  *
1224  * Topology refcount overview
1225  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1226  *
1227  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1228  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1229  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1230  *
1231  * Topology refcounts are not exposed to drivers, and are handled internally
1232  * by the DP MST helpers. The helpers use them in order to prevent the
1233  * in-memory topology state from being changed in the middle of critical
1234  * operations like changing the internal state of payload allocations. This
1235  * means each branch and port will be considered to be connected to the rest
1236  * of the topology until its topology refcount reaches zero. Additionally,
1237  * for ports this means that their associated &struct drm_connector will stay
1238  * registered with userspace until the port's refcount reaches 0.
1239  *
1240  * Malloc refcount overview
1241  * ~~~~~~~~~~~~~~~~~~~~~~~~
1242  *
1243  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1244  * drm_dp_mst_branch allocated even after all of its topology references have
1245  * been dropped, so that the driver or MST helpers can safely access each
1246  * branch's last known state before it was disconnected from the topology.
1247  * When the malloc refcount of a port or branch reaches 0, the memory
1248  * allocation containing the &struct drm_dp_mst_branch or &struct
1249  * drm_dp_mst_port respectively will be freed.
1250  *
1251  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1252  * to drivers. As of writing this documentation, there are no drivers that
1253  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1254  * helpers. Exposing this API to drivers in a race-free manner would take more
1255  * tweaking of the refcounting scheme, however patches are welcome provided
1256  * there is a legitimate driver usecase for this.
1257  *
1258  * Refcount relationships in a topology
1259  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1260  *
1261  * Let's take a look at why the relationship between topology and malloc
1262  * refcounts is designed the way it is.
1263  *
1264  * .. kernel-figure:: dp-mst/topology-figure-1.dot
1265  *
1266  *    An example of topology and malloc refs in a DP MST topology with two
1267  *    active payloads. Topology refcount increments are indicated by solid
1268  *    lines, and malloc refcount increments are indicated by dashed lines.
1269  *    Each starts from the branch which incremented the refcount, and ends at
1270  *    the branch to which the refcount belongs to, i.e. the arrow points the
1271  *    same way as the C pointers used to reference a structure.
1272  *
1273  * As you can see in the above figure, every branch increments the topology
1274  * refcount of its children, and increments the malloc refcount of its
1275  * parent. Additionally, every payload increments the malloc refcount of its
1276  * assigned port by 1.
1277  *
1278  * So, what would happen if MSTB #3 from the above figure was unplugged from
1279  * the system, but the driver hadn't yet removed payload #2 from port #3? The
1280  * topology would start to look like the figure below.
1281  *
1282  * .. kernel-figure:: dp-mst/topology-figure-2.dot
1283  *
1284  *    Ports and branch devices which have been released from memory are
1285  *    colored grey, and references which have been removed are colored red.
1286  *
1287  * Whenever a port or branch device's topology refcount reaches zero, it will
1288  * decrement the topology refcounts of all its children, the malloc refcount
1289  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1290  * #4, this means they both have been disconnected from the topology and freed
1291  * from memory. But, because payload #2 is still holding a reference to port
1292  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1293  * is still accessible from memory. This also means port #3 has not yet
1294  * decremented the malloc refcount of MSTB #3, so its &struct
1295  * drm_dp_mst_branch will also stay allocated in memory until port #3's
1296  * malloc refcount reaches 0.
1297  *
1298  * This relationship is necessary because in order to release payload #2, we
1299  * need to be able to figure out the last relative of port #3 that's still
1300  * connected to the topology. In this case, we would travel up the topology as
1301  * shown below.
1302  *
1303  * .. kernel-figure:: dp-mst/topology-figure-3.dot
1304  *
1305  * And finally, remove payload #2 by communicating with port #2 through
1306  * sideband transactions.
1307  */
1308
1309 /**
1310  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1311  * device
1312  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1313  *
1314  * Increments &drm_dp_mst_branch.malloc_kref. When
1315  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1316  * will be released and @mstb may no longer be used.
1317  *
1318  * See also: drm_dp_mst_put_mstb_malloc()
1319  */
1320 static void
1321 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1322 {
1323         kref_get(&mstb->malloc_kref);
1324         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1325 }
1326
1327 /**
1328  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1329  * device
1330  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1331  *
1332  * Decrements &drm_dp_mst_branch.malloc_kref. When
1333  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1334  * will be released and @mstb may no longer be used.
1335  *
1336  * See also: drm_dp_mst_get_mstb_malloc()
1337  */
1338 static void
1339 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1340 {
1341         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1342         kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1343 }
1344
1345 static void drm_dp_free_mst_port(struct kref *kref)
1346 {
1347         struct drm_dp_mst_port *port =
1348                 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1349
1350         drm_dp_mst_put_mstb_malloc(port->parent);
1351         kfree(port);
1352 }
1353
1354 /**
1355  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1356  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1357  *
1358  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1359  * reaches 0, the memory allocation for @port will be released and @port may
1360  * no longer be used.
1361  *
1362  * Because @port could potentially be freed at any time by the DP MST helpers
1363  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1364  * function, drivers that which to make use of &struct drm_dp_mst_port should
1365  * ensure that they grab at least one main malloc reference to their MST ports
1366  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1367  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1368  *
1369  * See also: drm_dp_mst_put_port_malloc()
1370  */
1371 void
1372 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1373 {
1374         kref_get(&port->malloc_kref);
1375         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1376 }
1377 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1378
1379 /**
1380  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1381  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1382  *
1383  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1384  * reaches 0, the memory allocation for @port will be released and @port may
1385  * no longer be used.
1386  *
1387  * See also: drm_dp_mst_get_port_malloc()
1388  */
1389 void
1390 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1391 {
1392         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1393         kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1394 }
1395 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1396
1397 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1398 {
1399         struct drm_dp_mst_branch *mstb =
1400                 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1401         struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1402         struct drm_dp_mst_port *port, *tmp;
1403         bool wake_tx = false;
1404
1405         mutex_lock(&mgr->lock);
1406         list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1407                 list_del(&port->next);
1408                 drm_dp_mst_topology_put_port(port);
1409         }
1410         mutex_unlock(&mgr->lock);
1411
1412         /* drop any tx slots msg */
1413         mutex_lock(&mstb->mgr->qlock);
1414         if (mstb->tx_slots[0]) {
1415                 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1416                 mstb->tx_slots[0] = NULL;
1417                 wake_tx = true;
1418         }
1419         if (mstb->tx_slots[1]) {
1420                 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1421                 mstb->tx_slots[1] = NULL;
1422                 wake_tx = true;
1423         }
1424         mutex_unlock(&mstb->mgr->qlock);
1425
1426         if (wake_tx)
1427                 wake_up_all(&mstb->mgr->tx_waitq);
1428
1429         drm_dp_mst_put_mstb_malloc(mstb);
1430 }
1431
1432 /**
1433  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1434  * branch device unless it's zero
1435  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1436  *
1437  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1438  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1439  * reached 0). Holding a topology reference implies that a malloc reference
1440  * will be held to @mstb as long as the user holds the topology reference.
1441  *
1442  * Care should be taken to ensure that the user has at least one malloc
1443  * reference to @mstb. If you already have a topology reference to @mstb, you
1444  * should use drm_dp_mst_topology_get_mstb() instead.
1445  *
1446  * See also:
1447  * drm_dp_mst_topology_get_mstb()
1448  * drm_dp_mst_topology_put_mstb()
1449  *
1450  * Returns:
1451  * * 1: A topology reference was grabbed successfully
1452  * * 0: @port is no longer in the topology, no reference was grabbed
1453  */
1454 static int __must_check
1455 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1456 {
1457         int ret = kref_get_unless_zero(&mstb->topology_kref);
1458
1459         if (ret)
1460                 DRM_DEBUG("mstb %p (%d)\n", mstb,
1461                           kref_read(&mstb->topology_kref));
1462
1463         return ret;
1464 }
1465
1466 /**
1467  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1468  * branch device
1469  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1470  *
1471  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1472  * not it's already reached 0. This is only valid to use in scenarios where
1473  * you are already guaranteed to have at least one active topology reference
1474  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1475  *
1476  * See also:
1477  * drm_dp_mst_topology_try_get_mstb()
1478  * drm_dp_mst_topology_put_mstb()
1479  */
1480 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1481 {
1482         WARN_ON(kref_read(&mstb->topology_kref) == 0);
1483         kref_get(&mstb->topology_kref);
1484         DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1485 }
1486
1487 /**
1488  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1489  * device
1490  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1491  *
1492  * Releases a topology reference from @mstb by decrementing
1493  * &drm_dp_mst_branch.topology_kref.
1494  *
1495  * See also:
1496  * drm_dp_mst_topology_try_get_mstb()
1497  * drm_dp_mst_topology_get_mstb()
1498  */
1499 static void
1500 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1501 {
1502         DRM_DEBUG("mstb %p (%d)\n",
1503                   mstb, kref_read(&mstb->topology_kref) - 1);
1504         kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1505 }
1506
1507 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1508 {
1509         struct drm_dp_mst_branch *mstb;
1510
1511         switch (old_pdt) {
1512         case DP_PEER_DEVICE_DP_LEGACY_CONV:
1513         case DP_PEER_DEVICE_SST_SINK:
1514                 /* remove i2c over sideband */
1515                 drm_dp_mst_unregister_i2c_bus(&port->aux);
1516                 break;
1517         case DP_PEER_DEVICE_MST_BRANCHING:
1518                 mstb = port->mstb;
1519                 port->mstb = NULL;
1520                 drm_dp_mst_topology_put_mstb(mstb);
1521                 break;
1522         }
1523 }
1524
1525 static void drm_dp_destroy_port(struct kref *kref)
1526 {
1527         struct drm_dp_mst_port *port =
1528                 container_of(kref, struct drm_dp_mst_port, topology_kref);
1529         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1530
1531         if (!port->input) {
1532                 kfree(port->cached_edid);
1533
1534                 /*
1535                  * The only time we don't have a connector
1536                  * on an output port is if the connector init
1537                  * fails.
1538                  */
1539                 if (port->connector) {
1540                         /* we can't destroy the connector here, as
1541                          * we might be holding the mode_config.mutex
1542                          * from an EDID retrieval */
1543
1544                         mutex_lock(&mgr->destroy_connector_lock);
1545                         list_add(&port->next, &mgr->destroy_connector_list);
1546                         mutex_unlock(&mgr->destroy_connector_lock);
1547                         schedule_work(&mgr->destroy_connector_work);
1548                         return;
1549                 }
1550                 /* no need to clean up vcpi
1551                  * as if we have no connector we never setup a vcpi */
1552                 drm_dp_port_teardown_pdt(port, port->pdt);
1553                 port->pdt = DP_PEER_DEVICE_NONE;
1554         }
1555         drm_dp_mst_put_port_malloc(port);
1556 }
1557
1558 /**
1559  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1560  * port unless it's zero
1561  * @port: &struct drm_dp_mst_port to increment the topology refcount of
1562  *
1563  * Attempts to grab a topology reference to @port, if it hasn't yet been
1564  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1565  * 0). Holding a topology reference implies that a malloc reference will be
1566  * held to @port as long as the user holds the topology reference.
1567  *
1568  * Care should be taken to ensure that the user has at least one malloc
1569  * reference to @port. If you already have a topology reference to @port, you
1570  * should use drm_dp_mst_topology_get_port() instead.
1571  *
1572  * See also:
1573  * drm_dp_mst_topology_get_port()
1574  * drm_dp_mst_topology_put_port()
1575  *
1576  * Returns:
1577  * * 1: A topology reference was grabbed successfully
1578  * * 0: @port is no longer in the topology, no reference was grabbed
1579  */
1580 static int __must_check
1581 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1582 {
1583         int ret = kref_get_unless_zero(&port->topology_kref);
1584
1585         if (ret)
1586                 DRM_DEBUG("port %p (%d)\n", port,
1587                           kref_read(&port->topology_kref));
1588
1589         return ret;
1590 }
1591
1592 /**
1593  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1594  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1595  *
1596  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1597  * not it's already reached 0. This is only valid to use in scenarios where
1598  * you are already guaranteed to have at least one active topology reference
1599  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1600  *
1601  * See also:
1602  * drm_dp_mst_topology_try_get_port()
1603  * drm_dp_mst_topology_put_port()
1604  */
1605 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1606 {
1607         WARN_ON(kref_read(&port->topology_kref) == 0);
1608         kref_get(&port->topology_kref);
1609         DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1610 }
1611
1612 /**
1613  * drm_dp_mst_topology_put_port() - release a topology reference to a port
1614  * @port: The &struct drm_dp_mst_port to release the topology reference from
1615  *
1616  * Releases a topology reference from @port by decrementing
1617  * &drm_dp_mst_port.topology_kref.
1618  *
1619  * See also:
1620  * drm_dp_mst_topology_try_get_port()
1621  * drm_dp_mst_topology_get_port()
1622  */
1623 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1624 {
1625         DRM_DEBUG("port %p (%d)\n",
1626                   port, kref_read(&port->topology_kref) - 1);
1627         kref_put(&port->topology_kref, drm_dp_destroy_port);
1628 }
1629
1630 static struct drm_dp_mst_branch *
1631 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1632                                               struct drm_dp_mst_branch *to_find)
1633 {
1634         struct drm_dp_mst_port *port;
1635         struct drm_dp_mst_branch *rmstb;
1636
1637         if (to_find == mstb)
1638                 return mstb;
1639
1640         list_for_each_entry(port, &mstb->ports, next) {
1641                 if (port->mstb) {
1642                         rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1643                             port->mstb, to_find);
1644                         if (rmstb)
1645                                 return rmstb;
1646                 }
1647         }
1648         return NULL;
1649 }
1650
1651 static struct drm_dp_mst_branch *
1652 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1653                                        struct drm_dp_mst_branch *mstb)
1654 {
1655         struct drm_dp_mst_branch *rmstb = NULL;
1656
1657         mutex_lock(&mgr->lock);
1658         if (mgr->mst_primary) {
1659                 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1660                     mgr->mst_primary, mstb);
1661
1662                 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1663                         rmstb = NULL;
1664         }
1665         mutex_unlock(&mgr->lock);
1666         return rmstb;
1667 }
1668
1669 static struct drm_dp_mst_port *
1670 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1671                                               struct drm_dp_mst_port *to_find)
1672 {
1673         struct drm_dp_mst_port *port, *mport;
1674
1675         list_for_each_entry(port, &mstb->ports, next) {
1676                 if (port == to_find)
1677                         return port;
1678
1679                 if (port->mstb) {
1680                         mport = drm_dp_mst_topology_get_port_validated_locked(
1681                             port->mstb, to_find);
1682                         if (mport)
1683                                 return mport;
1684                 }
1685         }
1686         return NULL;
1687 }
1688
1689 static struct drm_dp_mst_port *
1690 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1691                                        struct drm_dp_mst_port *port)
1692 {
1693         struct drm_dp_mst_port *rport = NULL;
1694
1695         mutex_lock(&mgr->lock);
1696         if (mgr->mst_primary) {
1697                 rport = drm_dp_mst_topology_get_port_validated_locked(
1698                     mgr->mst_primary, port);
1699
1700                 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1701                         rport = NULL;
1702         }
1703         mutex_unlock(&mgr->lock);
1704         return rport;
1705 }
1706
1707 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1708 {
1709         struct drm_dp_mst_port *port;
1710         int ret;
1711
1712         list_for_each_entry(port, &mstb->ports, next) {
1713                 if (port->port_num == port_num) {
1714                         ret = drm_dp_mst_topology_try_get_port(port);
1715                         return ret ? port : NULL;
1716                 }
1717         }
1718
1719         return NULL;
1720 }
1721
1722 /*
1723  * calculate a new RAD for this MST branch device
1724  * if parent has an LCT of 2 then it has 1 nibble of RAD,
1725  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1726  */
1727 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1728                                  u8 *rad)
1729 {
1730         int parent_lct = port->parent->lct;
1731         int shift = 4;
1732         int idx = (parent_lct - 1) / 2;
1733         if (parent_lct > 1) {
1734                 memcpy(rad, port->parent->rad, idx + 1);
1735                 shift = (parent_lct % 2) ? 4 : 0;
1736         } else
1737                 rad[0] = 0;
1738
1739         rad[idx] |= port->port_num << shift;
1740         return parent_lct + 1;
1741 }
1742
1743 /*
1744  * return sends link address for new mstb
1745  */
1746 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1747 {
1748         int ret;
1749         u8 rad[6], lct;
1750         bool send_link = false;
1751         switch (port->pdt) {
1752         case DP_PEER_DEVICE_DP_LEGACY_CONV:
1753         case DP_PEER_DEVICE_SST_SINK:
1754                 /* add i2c over sideband */
1755                 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1756                 break;
1757         case DP_PEER_DEVICE_MST_BRANCHING:
1758                 lct = drm_dp_calculate_rad(port, rad);
1759
1760                 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1761                 if (port->mstb) {
1762                         port->mstb->mgr = port->mgr;
1763                         port->mstb->port_parent = port;
1764                         /*
1765                          * Make sure this port's memory allocation stays
1766                          * around until its child MSTB releases it
1767                          */
1768                         drm_dp_mst_get_port_malloc(port);
1769
1770                         send_link = true;
1771                 }
1772                 break;
1773         }
1774         return send_link;
1775 }
1776
1777 /**
1778  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
1779  * @aux: Fake sideband AUX CH
1780  * @offset: address of the (first) register to read
1781  * @buffer: buffer to store the register values
1782  * @size: number of bytes in @buffer
1783  *
1784  * Performs the same functionality for remote devices via
1785  * sideband messaging as drm_dp_dpcd_read() does for local
1786  * devices via actual AUX CH.
1787  *
1788  * Return: Number of bytes read, or negative error code on failure.
1789  */
1790 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
1791                              unsigned int offset, void *buffer, size_t size)
1792 {
1793         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1794                                                     aux);
1795
1796         return drm_dp_send_dpcd_read(port->mgr, port,
1797                                      offset, size, buffer);
1798 }
1799
1800 /**
1801  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
1802  * @aux: Fake sideband AUX CH
1803  * @offset: address of the (first) register to write
1804  * @buffer: buffer containing the values to write
1805  * @size: number of bytes in @buffer
1806  *
1807  * Performs the same functionality for remote devices via
1808  * sideband messaging as drm_dp_dpcd_write() does for local
1809  * devices via actual AUX CH.
1810  *
1811  * Return: 0 on success, negative error code on failure.
1812  */
1813 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
1814                               unsigned int offset, void *buffer, size_t size)
1815 {
1816         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1817                                                     aux);
1818
1819         return drm_dp_send_dpcd_write(port->mgr, port,
1820                                       offset, size, buffer);
1821 }
1822
1823 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1824 {
1825         int ret;
1826
1827         memcpy(mstb->guid, guid, 16);
1828
1829         if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1830                 if (mstb->port_parent) {
1831                         ret = drm_dp_send_dpcd_write(
1832                                         mstb->mgr,
1833                                         mstb->port_parent,
1834                                         DP_GUID,
1835                                         16,
1836                                         mstb->guid);
1837                 } else {
1838
1839                         ret = drm_dp_dpcd_write(
1840                                         mstb->mgr->aux,
1841                                         DP_GUID,
1842                                         mstb->guid,
1843                                         16);
1844                 }
1845         }
1846 }
1847
1848 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1849                                 int pnum,
1850                                 char *proppath,
1851                                 size_t proppath_size)
1852 {
1853         int i;
1854         char temp[8];
1855         snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1856         for (i = 0; i < (mstb->lct - 1); i++) {
1857                 int shift = (i % 2) ? 0 : 4;
1858                 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1859                 snprintf(temp, sizeof(temp), "-%d", port_num);
1860                 strlcat(proppath, temp, proppath_size);
1861         }
1862         snprintf(temp, sizeof(temp), "-%d", pnum);
1863         strlcat(proppath, temp, proppath_size);
1864 }
1865
1866 /**
1867  * drm_dp_mst_connector_late_register() - Late MST connector registration
1868  * @connector: The MST connector
1869  * @port: The MST port for this connector
1870  *
1871  * Helper to register the remote aux device for this MST port. Drivers should
1872  * call this from their mst connector's late_register hook to enable MST aux
1873  * devices.
1874  *
1875  * Return: 0 on success, negative error code on failure.
1876  */
1877 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
1878                                        struct drm_dp_mst_port *port)
1879 {
1880         DRM_DEBUG_KMS("registering %s remote bus for %s\n",
1881                       port->aux.name, connector->kdev->kobj.name);
1882
1883         port->aux.dev = connector->kdev;
1884         return drm_dp_aux_register_devnode(&port->aux);
1885 }
1886 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
1887
1888 /**
1889  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
1890  * @connector: The MST connector
1891  * @port: The MST port for this connector
1892  *
1893  * Helper to unregister the remote aux device for this MST port, registered by
1894  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
1895  * connector's early_unregister hook.
1896  */
1897 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
1898                                            struct drm_dp_mst_port *port)
1899 {
1900         DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
1901                       port->aux.name, connector->kdev->kobj.name);
1902         drm_dp_aux_unregister_devnode(&port->aux);
1903 }
1904 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
1905
1906 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1907                             struct drm_device *dev,
1908                             struct drm_dp_link_addr_reply_port *port_msg)
1909 {
1910         struct drm_dp_mst_port *port;
1911         bool ret;
1912         bool created = false;
1913         int old_pdt = 0;
1914         int old_ddps = 0;
1915
1916         port = drm_dp_get_port(mstb, port_msg->port_number);
1917         if (!port) {
1918                 port = kzalloc(sizeof(*port), GFP_KERNEL);
1919                 if (!port)
1920                         return;
1921                 kref_init(&port->topology_kref);
1922                 kref_init(&port->malloc_kref);
1923                 port->parent = mstb;
1924                 port->port_num = port_msg->port_number;
1925                 port->mgr = mstb->mgr;
1926                 port->aux.name = "DPMST";
1927                 port->aux.dev = dev->dev;
1928                 port->aux.is_remote = true;
1929
1930                 /*
1931                  * Make sure the memory allocation for our parent branch stays
1932                  * around until our own memory allocation is released
1933                  */
1934                 drm_dp_mst_get_mstb_malloc(mstb);
1935
1936                 created = true;
1937         } else {
1938                 old_pdt = port->pdt;
1939                 old_ddps = port->ddps;
1940         }
1941
1942         port->pdt = port_msg->peer_device_type;
1943         port->input = port_msg->input_port;
1944         port->mcs = port_msg->mcs;
1945         port->ddps = port_msg->ddps;
1946         port->ldps = port_msg->legacy_device_plug_status;
1947         port->dpcd_rev = port_msg->dpcd_revision;
1948         port->num_sdp_streams = port_msg->num_sdp_streams;
1949         port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1950
1951         /* manage mstb port lists with mgr lock - take a reference
1952            for this list */
1953         if (created) {
1954                 mutex_lock(&mstb->mgr->lock);
1955                 drm_dp_mst_topology_get_port(port);
1956                 list_add(&port->next, &mstb->ports);
1957                 mutex_unlock(&mstb->mgr->lock);
1958         }
1959
1960         if (old_ddps != port->ddps) {
1961                 if (port->ddps) {
1962                         if (!port->input) {
1963                                 drm_dp_send_enum_path_resources(mstb->mgr,
1964                                                                 mstb, port);
1965                         }
1966                 } else {
1967                         port->available_pbn = 0;
1968                 }
1969         }
1970
1971         if (old_pdt != port->pdt && !port->input) {
1972                 drm_dp_port_teardown_pdt(port, old_pdt);
1973
1974                 ret = drm_dp_port_setup_pdt(port);
1975                 if (ret == true)
1976                         drm_dp_send_link_address(mstb->mgr, port->mstb);
1977         }
1978
1979         if (created && !port->input) {
1980                 char proppath[255];
1981
1982                 build_mst_prop_path(mstb, port->port_num, proppath,
1983                                     sizeof(proppath));
1984                 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1985                                                                    port,
1986                                                                    proppath);
1987                 if (!port->connector) {
1988                         /* remove it from the port list */
1989                         mutex_lock(&mstb->mgr->lock);
1990                         list_del(&port->next);
1991                         mutex_unlock(&mstb->mgr->lock);
1992                         /* drop port list reference */
1993                         drm_dp_mst_topology_put_port(port);
1994                         goto out;
1995                 }
1996                 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1997                      port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1998                     port->port_num >= DP_MST_LOGICAL_PORT_0) {
1999                         port->cached_edid = drm_get_edid(port->connector,
2000                                                          &port->aux.ddc);
2001                         drm_connector_set_tile_property(port->connector);
2002                 }
2003                 (*mstb->mgr->cbs->register_connector)(port->connector);
2004         }
2005
2006 out:
2007         /* put reference to this port */
2008         drm_dp_mst_topology_put_port(port);
2009 }
2010
2011 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
2012                                struct drm_dp_connection_status_notify *conn_stat)
2013 {
2014         struct drm_dp_mst_port *port;
2015         int old_pdt;
2016         int old_ddps;
2017         bool dowork = false;
2018         port = drm_dp_get_port(mstb, conn_stat->port_number);
2019         if (!port)
2020                 return;
2021
2022         old_ddps = port->ddps;
2023         old_pdt = port->pdt;
2024         port->pdt = conn_stat->peer_device_type;
2025         port->mcs = conn_stat->message_capability_status;
2026         port->ldps = conn_stat->legacy_device_plug_status;
2027         port->ddps = conn_stat->displayport_device_plug_status;
2028
2029         if (old_ddps != port->ddps) {
2030                 if (port->ddps) {
2031                         dowork = true;
2032                 } else {
2033                         port->available_pbn = 0;
2034                 }
2035         }
2036         if (old_pdt != port->pdt && !port->input) {
2037                 drm_dp_port_teardown_pdt(port, old_pdt);
2038
2039                 if (drm_dp_port_setup_pdt(port))
2040                         dowork = true;
2041         }
2042
2043         drm_dp_mst_topology_put_port(port);
2044         if (dowork)
2045                 queue_work(system_long_wq, &mstb->mgr->work);
2046
2047 }
2048
2049 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2050                                                                u8 lct, u8 *rad)
2051 {
2052         struct drm_dp_mst_branch *mstb;
2053         struct drm_dp_mst_port *port;
2054         int i, ret;
2055         /* find the port by iterating down */
2056
2057         mutex_lock(&mgr->lock);
2058         mstb = mgr->mst_primary;
2059
2060         if (!mstb)
2061                 goto out;
2062
2063         for (i = 0; i < lct - 1; i++) {
2064                 int shift = (i % 2) ? 0 : 4;
2065                 int port_num = (rad[i / 2] >> shift) & 0xf;
2066
2067                 list_for_each_entry(port, &mstb->ports, next) {
2068                         if (port->port_num == port_num) {
2069                                 mstb = port->mstb;
2070                                 if (!mstb) {
2071                                         DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2072                                         goto out;
2073                                 }
2074
2075                                 break;
2076                         }
2077                 }
2078         }
2079         ret = drm_dp_mst_topology_try_get_mstb(mstb);
2080         if (!ret)
2081                 mstb = NULL;
2082 out:
2083         mutex_unlock(&mgr->lock);
2084         return mstb;
2085 }
2086
2087 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2088         struct drm_dp_mst_branch *mstb,
2089         const uint8_t *guid)
2090 {
2091         struct drm_dp_mst_branch *found_mstb;
2092         struct drm_dp_mst_port *port;
2093
2094         if (memcmp(mstb->guid, guid, 16) == 0)
2095                 return mstb;
2096
2097
2098         list_for_each_entry(port, &mstb->ports, next) {
2099                 if (!port->mstb)
2100                         continue;
2101
2102                 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2103
2104                 if (found_mstb)
2105                         return found_mstb;
2106         }
2107
2108         return NULL;
2109 }
2110
2111 static struct drm_dp_mst_branch *
2112 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2113                                      const uint8_t *guid)
2114 {
2115         struct drm_dp_mst_branch *mstb;
2116         int ret;
2117
2118         /* find the port by iterating down */
2119         mutex_lock(&mgr->lock);
2120
2121         mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2122         if (mstb) {
2123                 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2124                 if (!ret)
2125                         mstb = NULL;
2126         }
2127
2128         mutex_unlock(&mgr->lock);
2129         return mstb;
2130 }
2131
2132 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2133                                                struct drm_dp_mst_branch *mstb)
2134 {
2135         struct drm_dp_mst_port *port;
2136         struct drm_dp_mst_branch *mstb_child;
2137         if (!mstb->link_address_sent)
2138                 drm_dp_send_link_address(mgr, mstb);
2139
2140         list_for_each_entry(port, &mstb->ports, next) {
2141                 if (port->input)
2142                         continue;
2143
2144                 if (!port->ddps)
2145                         continue;
2146
2147                 if (!port->available_pbn)
2148                         drm_dp_send_enum_path_resources(mgr, mstb, port);
2149
2150                 if (port->mstb) {
2151                         mstb_child = drm_dp_mst_topology_get_mstb_validated(
2152                             mgr, port->mstb);
2153                         if (mstb_child) {
2154                                 drm_dp_check_and_send_link_address(mgr, mstb_child);
2155                                 drm_dp_mst_topology_put_mstb(mstb_child);
2156                         }
2157                 }
2158         }
2159 }
2160
2161 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2162 {
2163         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
2164         struct drm_dp_mst_branch *mstb;
2165         int ret;
2166
2167         mutex_lock(&mgr->lock);
2168         mstb = mgr->mst_primary;
2169         if (mstb) {
2170                 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2171                 if (!ret)
2172                         mstb = NULL;
2173         }
2174         mutex_unlock(&mgr->lock);
2175         if (mstb) {
2176                 drm_dp_check_and_send_link_address(mgr, mstb);
2177                 drm_dp_mst_topology_put_mstb(mstb);
2178         }
2179 }
2180
2181 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2182                                  u8 *guid)
2183 {
2184         u64 salt;
2185
2186         if (memchr_inv(guid, 0, 16))
2187                 return true;
2188
2189         salt = get_jiffies_64();
2190
2191         memcpy(&guid[0], &salt, sizeof(u64));
2192         memcpy(&guid[8], &salt, sizeof(u64));
2193
2194         return false;
2195 }
2196
2197 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2198 {
2199         struct drm_dp_sideband_msg_req_body req;
2200
2201         req.req_type = DP_REMOTE_DPCD_READ;
2202         req.u.dpcd_read.port_number = port_num;
2203         req.u.dpcd_read.dpcd_address = offset;
2204         req.u.dpcd_read.num_bytes = num_bytes;
2205         drm_dp_encode_sideband_req(&req, msg);
2206
2207         return 0;
2208 }
2209
2210 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2211                                     bool up, u8 *msg, int len)
2212 {
2213         int ret;
2214         int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2215         int tosend, total, offset;
2216         int retries = 0;
2217
2218 retry:
2219         total = len;
2220         offset = 0;
2221         do {
2222                 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2223
2224                 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2225                                         &msg[offset],
2226                                         tosend);
2227                 if (ret != tosend) {
2228                         if (ret == -EIO && retries < 5) {
2229                                 retries++;
2230                                 goto retry;
2231                         }
2232                         DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2233
2234                         return -EIO;
2235                 }
2236                 offset += tosend;
2237                 total -= tosend;
2238         } while (total > 0);
2239         return 0;
2240 }
2241
2242 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2243                                   struct drm_dp_sideband_msg_tx *txmsg)
2244 {
2245         struct drm_dp_mst_branch *mstb = txmsg->dst;
2246         u8 req_type;
2247
2248         /* both msg slots are full */
2249         if (txmsg->seqno == -1) {
2250                 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2251                         DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2252                         return -EAGAIN;
2253                 }
2254                 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2255                         txmsg->seqno = mstb->last_seqno;
2256                         mstb->last_seqno ^= 1;
2257                 } else if (mstb->tx_slots[0] == NULL)
2258                         txmsg->seqno = 0;
2259                 else
2260                         txmsg->seqno = 1;
2261                 mstb->tx_slots[txmsg->seqno] = txmsg;
2262         }
2263
2264         req_type = txmsg->msg[0] & 0x7f;
2265         if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2266                 req_type == DP_RESOURCE_STATUS_NOTIFY)
2267                 hdr->broadcast = 1;
2268         else
2269                 hdr->broadcast = 0;
2270         hdr->path_msg = txmsg->path_msg;
2271         hdr->lct = mstb->lct;
2272         hdr->lcr = mstb->lct - 1;
2273         if (mstb->lct > 1)
2274                 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2275         hdr->seqno = txmsg->seqno;
2276         return 0;
2277 }
2278 /*
2279  * process a single block of the next message in the sideband queue
2280  */
2281 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2282                                    struct drm_dp_sideband_msg_tx *txmsg,
2283                                    bool up)
2284 {
2285         u8 chunk[48];
2286         struct drm_dp_sideband_msg_hdr hdr;
2287         int len, space, idx, tosend;
2288         int ret;
2289
2290         memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2291
2292         if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2293                 txmsg->seqno = -1;
2294                 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2295         }
2296
2297         /* make hdr from dst mst - for replies use seqno
2298            otherwise assign one */
2299         ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2300         if (ret < 0)
2301                 return ret;
2302
2303         /* amount left to send in this message */
2304         len = txmsg->cur_len - txmsg->cur_offset;
2305
2306         /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2307         space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2308
2309         tosend = min(len, space);
2310         if (len == txmsg->cur_len)
2311                 hdr.somt = 1;
2312         if (space >= len)
2313                 hdr.eomt = 1;
2314
2315
2316         hdr.msg_len = tosend + 1;
2317         drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2318         memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2319         /* add crc at end */
2320         drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2321         idx += tosend + 1;
2322
2323         ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2324         if (unlikely(ret && drm_debug & DRM_UT_DP)) {
2325                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2326
2327                 drm_printf(&p, "sideband msg failed to send\n");
2328                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2329                 return ret;
2330         }
2331
2332         txmsg->cur_offset += tosend;
2333         if (txmsg->cur_offset == txmsg->cur_len) {
2334                 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2335                 return 1;
2336         }
2337         return 0;
2338 }
2339
2340 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2341 {
2342         struct drm_dp_sideband_msg_tx *txmsg;
2343         int ret;
2344
2345         WARN_ON(!mutex_is_locked(&mgr->qlock));
2346
2347         /* construct a chunk from the first msg in the tx_msg queue */
2348         if (list_empty(&mgr->tx_msg_downq))
2349                 return;
2350
2351         txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2352         ret = process_single_tx_qlock(mgr, txmsg, false);
2353         if (ret == 1) {
2354                 /* txmsg is sent it should be in the slots now */
2355                 list_del(&txmsg->next);
2356         } else if (ret) {
2357                 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2358                 list_del(&txmsg->next);
2359                 if (txmsg->seqno != -1)
2360                         txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2361                 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2362                 wake_up_all(&mgr->tx_waitq);
2363         }
2364 }
2365
2366 /* called holding qlock */
2367 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2368                                        struct drm_dp_sideband_msg_tx *txmsg)
2369 {
2370         int ret;
2371
2372         /* construct a chunk from the first msg in the tx_msg queue */
2373         ret = process_single_tx_qlock(mgr, txmsg, true);
2374
2375         if (ret != 1)
2376                 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2377
2378         if (txmsg->seqno != -1) {
2379                 WARN_ON((unsigned int)txmsg->seqno >
2380                         ARRAY_SIZE(txmsg->dst->tx_slots));
2381                 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2382         }
2383 }
2384
2385 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2386                                  struct drm_dp_sideband_msg_tx *txmsg)
2387 {
2388         mutex_lock(&mgr->qlock);
2389         list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2390
2391         if (unlikely(drm_debug & DRM_UT_DP)) {
2392                 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2393
2394                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2395         }
2396
2397         if (list_is_singular(&mgr->tx_msg_downq))
2398                 process_single_down_tx_qlock(mgr);
2399         mutex_unlock(&mgr->qlock);
2400 }
2401
2402 static void
2403 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2404 {
2405         struct drm_dp_link_addr_reply_port *port_reply;
2406         int i;
2407
2408         for (i = 0; i < reply->nports; i++) {
2409                 port_reply = &reply->ports[i];
2410                 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2411                               i,
2412                               port_reply->input_port,
2413                               port_reply->peer_device_type,
2414                               port_reply->port_number,
2415                               port_reply->dpcd_revision,
2416                               port_reply->mcs,
2417                               port_reply->ddps,
2418                               port_reply->legacy_device_plug_status,
2419                               port_reply->num_sdp_streams,
2420                               port_reply->num_sdp_stream_sinks);
2421         }
2422 }
2423
2424 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2425                                      struct drm_dp_mst_branch *mstb)
2426 {
2427         int len;
2428         struct drm_dp_sideband_msg_tx *txmsg;
2429         int ret;
2430
2431         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2432         if (!txmsg)
2433                 return;
2434
2435         txmsg->dst = mstb;
2436         len = build_link_address(txmsg);
2437
2438         mstb->link_address_sent = true;
2439         drm_dp_queue_down_tx(mgr, txmsg);
2440
2441         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2442         if (ret > 0) {
2443                 int i;
2444
2445                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2446                         DRM_DEBUG_KMS("link address nak received\n");
2447                 } else {
2448                         DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2449                         drm_dp_dump_link_address(&txmsg->reply.u.link_addr);
2450
2451                         drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2452
2453                         for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2454                                 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2455                         }
2456                         drm_kms_helper_hotplug_event(mgr->dev);
2457                 }
2458         } else {
2459                 mstb->link_address_sent = false;
2460                 DRM_DEBUG_KMS("link address failed %d\n", ret);
2461         }
2462
2463         kfree(txmsg);
2464 }
2465
2466 static int
2467 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2468                                 struct drm_dp_mst_branch *mstb,
2469                                 struct drm_dp_mst_port *port)
2470 {
2471         struct drm_dp_enum_path_resources_ack_reply *path_res;
2472         struct drm_dp_sideband_msg_tx *txmsg;
2473         int len;
2474         int ret;
2475
2476         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2477         if (!txmsg)
2478                 return -ENOMEM;
2479
2480         txmsg->dst = mstb;
2481         len = build_enum_path_resources(txmsg, port->port_num);
2482
2483         drm_dp_queue_down_tx(mgr, txmsg);
2484
2485         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2486         if (ret > 0) {
2487                 path_res = &txmsg->reply.u.path_resources;
2488
2489                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2490                         DRM_DEBUG_KMS("enum path resources nak received\n");
2491                 } else {
2492                         if (port->port_num != path_res->port_number)
2493                                 DRM_ERROR("got incorrect port in response\n");
2494
2495                         DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2496                                       path_res->port_number,
2497                                       path_res->full_payload_bw_number,
2498                                       path_res->avail_payload_bw_number);
2499                         port->available_pbn =
2500                                 path_res->avail_payload_bw_number;
2501                 }
2502         }
2503
2504         kfree(txmsg);
2505         return 0;
2506 }
2507
2508 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2509 {
2510         if (!mstb->port_parent)
2511                 return NULL;
2512
2513         if (mstb->port_parent->mstb != mstb)
2514                 return mstb->port_parent;
2515
2516         return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2517 }
2518
2519 /*
2520  * Searches upwards in the topology starting from mstb to try to find the
2521  * closest available parent of mstb that's still connected to the rest of the
2522  * topology. This can be used in order to perform operations like releasing
2523  * payloads, where the branch device which owned the payload may no longer be
2524  * around and thus would require that the payload on the last living relative
2525  * be freed instead.
2526  */
2527 static struct drm_dp_mst_branch *
2528 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2529                                         struct drm_dp_mst_branch *mstb,
2530                                         int *port_num)
2531 {
2532         struct drm_dp_mst_branch *rmstb = NULL;
2533         struct drm_dp_mst_port *found_port;
2534
2535         mutex_lock(&mgr->lock);
2536         if (!mgr->mst_primary)
2537                 goto out;
2538
2539         do {
2540                 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2541                 if (!found_port)
2542                         break;
2543
2544                 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2545                         rmstb = found_port->parent;
2546                         *port_num = found_port->port_num;
2547                 } else {
2548                         /* Search again, starting from this parent */
2549                         mstb = found_port->parent;
2550                 }
2551         } while (!rmstb);
2552 out:
2553         mutex_unlock(&mgr->lock);
2554         return rmstb;
2555 }
2556
2557 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2558                                    struct drm_dp_mst_port *port,
2559                                    int id,
2560                                    int pbn)
2561 {
2562         struct drm_dp_sideband_msg_tx *txmsg;
2563         struct drm_dp_mst_branch *mstb;
2564         int len, ret, port_num;
2565         u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2566         int i;
2567
2568         port_num = port->port_num;
2569         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2570         if (!mstb) {
2571                 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2572                                                                port->parent,
2573                                                                &port_num);
2574
2575                 if (!mstb)
2576                         return -EINVAL;
2577         }
2578
2579         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2580         if (!txmsg) {
2581                 ret = -ENOMEM;
2582                 goto fail_put;
2583         }
2584
2585         for (i = 0; i < port->num_sdp_streams; i++)
2586                 sinks[i] = i;
2587
2588         txmsg->dst = mstb;
2589         len = build_allocate_payload(txmsg, port_num,
2590                                      id,
2591                                      pbn, port->num_sdp_streams, sinks);
2592
2593         drm_dp_queue_down_tx(mgr, txmsg);
2594
2595         /*
2596          * FIXME: there is a small chance that between getting the last
2597          * connected mstb and sending the payload message, the last connected
2598          * mstb could also be removed from the topology. In the future, this
2599          * needs to be fixed by restarting the
2600          * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2601          * timeout if the topology is still connected to the system.
2602          */
2603         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2604         if (ret > 0) {
2605                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2606                         ret = -EINVAL;
2607                 else
2608                         ret = 0;
2609         }
2610         kfree(txmsg);
2611 fail_put:
2612         drm_dp_mst_topology_put_mstb(mstb);
2613         return ret;
2614 }
2615
2616 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2617                                  struct drm_dp_mst_port *port, bool power_up)
2618 {
2619         struct drm_dp_sideband_msg_tx *txmsg;
2620         int len, ret;
2621
2622         port = drm_dp_mst_topology_get_port_validated(mgr, port);
2623         if (!port)
2624                 return -EINVAL;
2625
2626         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2627         if (!txmsg) {
2628                 drm_dp_mst_topology_put_port(port);
2629                 return -ENOMEM;
2630         }
2631
2632         txmsg->dst = port->parent;
2633         len = build_power_updown_phy(txmsg, port->port_num, power_up);
2634         drm_dp_queue_down_tx(mgr, txmsg);
2635
2636         ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2637         if (ret > 0) {
2638                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2639                         ret = -EINVAL;
2640                 else
2641                         ret = 0;
2642         }
2643         kfree(txmsg);
2644         drm_dp_mst_topology_put_port(port);
2645
2646         return ret;
2647 }
2648 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2649
2650 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2651                                        int id,
2652                                        struct drm_dp_payload *payload)
2653 {
2654         int ret;
2655
2656         ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2657         if (ret < 0) {
2658                 payload->payload_state = 0;
2659                 return ret;
2660         }
2661         payload->payload_state = DP_PAYLOAD_LOCAL;
2662         return 0;
2663 }
2664
2665 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2666                                        struct drm_dp_mst_port *port,
2667                                        int id,
2668                                        struct drm_dp_payload *payload)
2669 {
2670         int ret;
2671         ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2672         if (ret < 0)
2673                 return ret;
2674         payload->payload_state = DP_PAYLOAD_REMOTE;
2675         return ret;
2676 }
2677
2678 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2679                                         struct drm_dp_mst_port *port,
2680                                         int id,
2681                                         struct drm_dp_payload *payload)
2682 {
2683         DRM_DEBUG_KMS("\n");
2684         /* it's okay for these to fail */
2685         if (port) {
2686                 drm_dp_payload_send_msg(mgr, port, id, 0);
2687         }
2688
2689         drm_dp_dpcd_write_payload(mgr, id, payload);
2690         payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2691         return 0;
2692 }
2693
2694 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2695                                         int id,
2696                                         struct drm_dp_payload *payload)
2697 {
2698         payload->payload_state = 0;
2699         return 0;
2700 }
2701
2702 /**
2703  * drm_dp_update_payload_part1() - Execute payload update part 1
2704  * @mgr: manager to use.
2705  *
2706  * This iterates over all proposed virtual channels, and tries to
2707  * allocate space in the link for them. For 0->slots transitions,
2708  * this step just writes the VCPI to the MST device. For slots->0
2709  * transitions, this writes the updated VCPIs and removes the
2710  * remote VC payloads.
2711  *
2712  * after calling this the driver should generate ACT and payload
2713  * packets.
2714  */
2715 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2716 {
2717         struct drm_dp_payload req_payload;
2718         struct drm_dp_mst_port *port;
2719         int i, j;
2720         int cur_slots = 1;
2721
2722         mutex_lock(&mgr->payload_lock);
2723         for (i = 0; i < mgr->max_payloads; i++) {
2724                 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2725                 struct drm_dp_payload *payload = &mgr->payloads[i];
2726                 bool put_port = false;
2727
2728                 /* solve the current payloads - compare to the hw ones
2729                    - update the hw view */
2730                 req_payload.start_slot = cur_slots;
2731                 if (vcpi) {
2732                         port = container_of(vcpi, struct drm_dp_mst_port,
2733                                             vcpi);
2734
2735                         /* Validated ports don't matter if we're releasing
2736                          * VCPI
2737                          */
2738                         if (vcpi->num_slots) {
2739                                 port = drm_dp_mst_topology_get_port_validated(
2740                                     mgr, port);
2741                                 if (!port) {
2742                                         mutex_unlock(&mgr->payload_lock);
2743                                         return -EINVAL;
2744                                 }
2745                                 put_port = true;
2746                         }
2747
2748                         req_payload.num_slots = vcpi->num_slots;
2749                         req_payload.vcpi = vcpi->vcpi;
2750                 } else {
2751                         port = NULL;
2752                         req_payload.num_slots = 0;
2753                 }
2754
2755                 payload->start_slot = req_payload.start_slot;
2756                 /* work out what is required to happen with this payload */
2757                 if (payload->num_slots != req_payload.num_slots) {
2758
2759                         /* need to push an update for this payload */
2760                         if (req_payload.num_slots) {
2761                                 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2762                                                             &req_payload);
2763                                 payload->num_slots = req_payload.num_slots;
2764                                 payload->vcpi = req_payload.vcpi;
2765
2766                         } else if (payload->num_slots) {
2767                                 payload->num_slots = 0;
2768                                 drm_dp_destroy_payload_step1(mgr, port,
2769                                                              payload->vcpi,
2770                                                              payload);
2771                                 req_payload.payload_state =
2772                                         payload->payload_state;
2773                                 payload->start_slot = 0;
2774                         }
2775                         payload->payload_state = req_payload.payload_state;
2776                 }
2777                 cur_slots += req_payload.num_slots;
2778
2779                 if (put_port)
2780                         drm_dp_mst_topology_put_port(port);
2781         }
2782
2783         for (i = 0; i < mgr->max_payloads; i++) {
2784                 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2785                         continue;
2786
2787                 DRM_DEBUG_KMS("removing payload %d\n", i);
2788                 for (j = i; j < mgr->max_payloads - 1; j++) {
2789                         mgr->payloads[j] = mgr->payloads[j + 1];
2790                         mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2791
2792                         if (mgr->proposed_vcpis[j] &&
2793                             mgr->proposed_vcpis[j]->num_slots) {
2794                                 set_bit(j + 1, &mgr->payload_mask);
2795                         } else {
2796                                 clear_bit(j + 1, &mgr->payload_mask);
2797                         }
2798                 }
2799
2800                 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2801                        sizeof(struct drm_dp_payload));
2802                 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2803                 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2804         }
2805         mutex_unlock(&mgr->payload_lock);
2806
2807         return 0;
2808 }
2809 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2810
2811 /**
2812  * drm_dp_update_payload_part2() - Execute payload update part 2
2813  * @mgr: manager to use.
2814  *
2815  * This iterates over all proposed virtual channels, and tries to
2816  * allocate space in the link for them. For 0->slots transitions,
2817  * this step writes the remote VC payload commands. For slots->0
2818  * this just resets some internal state.
2819  */
2820 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2821 {
2822         struct drm_dp_mst_port *port;
2823         int i;
2824         int ret = 0;
2825         mutex_lock(&mgr->payload_lock);
2826         for (i = 0; i < mgr->max_payloads; i++) {
2827
2828                 if (!mgr->proposed_vcpis[i])
2829                         continue;
2830
2831                 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2832
2833                 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2834                 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2835                         ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2836                 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2837                         ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2838                 }
2839                 if (ret) {
2840                         mutex_unlock(&mgr->payload_lock);
2841                         return ret;
2842                 }
2843         }
2844         mutex_unlock(&mgr->payload_lock);
2845         return 0;
2846 }
2847 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2848
2849 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2850                                  struct drm_dp_mst_port *port,
2851                                  int offset, int size, u8 *bytes)
2852 {
2853         int len;
2854         int ret = 0;
2855         struct drm_dp_sideband_msg_tx *txmsg;
2856         struct drm_dp_mst_branch *mstb;
2857
2858         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2859         if (!mstb)
2860                 return -EINVAL;
2861
2862         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2863         if (!txmsg) {
2864                 ret = -ENOMEM;
2865                 goto fail_put;
2866         }
2867
2868         len = build_dpcd_read(txmsg, port->port_num, offset, size);
2869         txmsg->dst = port->parent;
2870
2871         drm_dp_queue_down_tx(mgr, txmsg);
2872
2873         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2874         if (ret < 0)
2875                 goto fail_free;
2876
2877         /* DPCD read should never be NACKed */
2878         if (txmsg->reply.reply_type == 1) {
2879                 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
2880                           mstb, port->port_num, offset, size);
2881                 ret = -EIO;
2882                 goto fail_free;
2883         }
2884
2885         if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
2886                 ret = -EPROTO;
2887                 goto fail_free;
2888         }
2889
2890         ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
2891                     size);
2892         memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
2893
2894 fail_free:
2895         kfree(txmsg);
2896 fail_put:
2897         drm_dp_mst_topology_put_mstb(mstb);
2898
2899         return ret;
2900 }
2901
2902 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2903                                   struct drm_dp_mst_port *port,
2904                                   int offset, int size, u8 *bytes)
2905 {
2906         int len;
2907         int ret;
2908         struct drm_dp_sideband_msg_tx *txmsg;
2909         struct drm_dp_mst_branch *mstb;
2910
2911         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2912         if (!mstb)
2913                 return -EINVAL;
2914
2915         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2916         if (!txmsg) {
2917                 ret = -ENOMEM;
2918                 goto fail_put;
2919         }
2920
2921         len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2922         txmsg->dst = mstb;
2923
2924         drm_dp_queue_down_tx(mgr, txmsg);
2925
2926         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2927         if (ret > 0) {
2928                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2929                         ret = -EIO;
2930                 else
2931                         ret = 0;
2932         }
2933         kfree(txmsg);
2934 fail_put:
2935         drm_dp_mst_topology_put_mstb(mstb);
2936         return ret;
2937 }
2938
2939 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2940 {
2941         struct drm_dp_sideband_msg_reply_body reply;
2942
2943         reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2944         reply.req_type = req_type;
2945         drm_dp_encode_sideband_reply(&reply, msg);
2946         return 0;
2947 }
2948
2949 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2950                                     struct drm_dp_mst_branch *mstb,
2951                                     int req_type, int seqno, bool broadcast)
2952 {
2953         struct drm_dp_sideband_msg_tx *txmsg;
2954
2955         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2956         if (!txmsg)
2957                 return -ENOMEM;
2958
2959         txmsg->dst = mstb;
2960         txmsg->seqno = seqno;
2961         drm_dp_encode_up_ack_reply(txmsg, req_type);
2962
2963         mutex_lock(&mgr->qlock);
2964
2965         process_single_up_tx_qlock(mgr, txmsg);
2966
2967         mutex_unlock(&mgr->qlock);
2968
2969         kfree(txmsg);
2970         return 0;
2971 }
2972
2973 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2974                                      int dp_link_count,
2975                                      int *out)
2976 {
2977         switch (dp_link_bw) {
2978         default:
2979                 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2980                               dp_link_bw, dp_link_count);
2981                 return false;
2982
2983         case DP_LINK_BW_1_62:
2984                 *out = 3 * dp_link_count;
2985                 break;
2986         case DP_LINK_BW_2_7:
2987                 *out = 5 * dp_link_count;
2988                 break;
2989         case DP_LINK_BW_5_4:
2990                 *out = 10 * dp_link_count;
2991                 break;
2992         case DP_LINK_BW_8_1:
2993                 *out = 15 * dp_link_count;
2994                 break;
2995         }
2996         return true;
2997 }
2998
2999 /**
3000  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3001  * @mgr: manager to set state for
3002  * @mst_state: true to enable MST on this connector - false to disable.
3003  *
3004  * This is called by the driver when it detects an MST capable device plugged
3005  * into a DP MST capable port, or when a DP MST capable device is unplugged.
3006  */
3007 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3008 {
3009         int ret = 0;
3010         struct drm_dp_mst_branch *mstb = NULL;
3011
3012         mutex_lock(&mgr->lock);
3013         if (mst_state == mgr->mst_state)
3014                 goto out_unlock;
3015
3016         mgr->mst_state = mst_state;
3017         /* set the device into MST mode */
3018         if (mst_state) {
3019                 WARN_ON(mgr->mst_primary);
3020
3021                 /* get dpcd info */
3022                 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3023                 if (ret != DP_RECEIVER_CAP_SIZE) {
3024                         DRM_DEBUG_KMS("failed to read DPCD\n");
3025                         goto out_unlock;
3026                 }
3027
3028                 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3029                                               mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
3030                                               &mgr->pbn_div)) {
3031                         ret = -EINVAL;
3032                         goto out_unlock;
3033                 }
3034
3035                 /* add initial branch device at LCT 1 */
3036                 mstb = drm_dp_add_mst_branch_device(1, NULL);
3037                 if (mstb == NULL) {
3038                         ret = -ENOMEM;
3039                         goto out_unlock;
3040                 }
3041                 mstb->mgr = mgr;
3042
3043                 /* give this the main reference */
3044                 mgr->mst_primary = mstb;
3045                 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3046
3047                 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3048                                                          DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3049                 if (ret < 0) {
3050                         goto out_unlock;
3051                 }
3052
3053                 {
3054                         struct drm_dp_payload reset_pay;
3055                         reset_pay.start_slot = 0;
3056                         reset_pay.num_slots = 0x3f;
3057                         drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3058                 }
3059
3060                 queue_work(system_long_wq, &mgr->work);
3061
3062                 ret = 0;
3063         } else {
3064                 /* disable MST on the device */
3065                 mstb = mgr->mst_primary;
3066                 mgr->mst_primary = NULL;
3067                 /* this can fail if the device is gone */
3068                 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3069                 ret = 0;
3070                 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3071                 mgr->payload_mask = 0;
3072                 set_bit(0, &mgr->payload_mask);
3073                 mgr->vcpi_mask = 0;
3074         }
3075
3076 out_unlock:
3077         mutex_unlock(&mgr->lock);
3078         if (mstb)
3079                 drm_dp_mst_topology_put_mstb(mstb);
3080         return ret;
3081
3082 }
3083 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3084
3085 /**
3086  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3087  * @mgr: manager to suspend
3088  *
3089  * This function tells the MST device that we can't handle UP messages
3090  * anymore. This should stop it from sending any since we are suspended.
3091  */
3092 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3093 {
3094         mutex_lock(&mgr->lock);
3095         drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3096                            DP_MST_EN | DP_UPSTREAM_IS_SRC);
3097         mutex_unlock(&mgr->lock);
3098         flush_work(&mgr->work);
3099         flush_work(&mgr->destroy_connector_work);
3100 }
3101 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3102
3103 /**
3104  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3105  * @mgr: manager to resume
3106  *
3107  * This will fetch DPCD and see if the device is still there,
3108  * if it is, it will rewrite the MSTM control bits, and return.
3109  *
3110  * if the device fails this returns -1, and the driver should do
3111  * a full MST reprobe, in case we were undocked.
3112  */
3113 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
3114 {
3115         int ret = 0;
3116
3117         mutex_lock(&mgr->lock);
3118
3119         if (mgr->mst_primary) {
3120                 int sret;
3121                 u8 guid[16];
3122
3123                 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3124                 if (sret != DP_RECEIVER_CAP_SIZE) {
3125                         DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3126                         ret = -1;
3127                         goto out_unlock;
3128                 }
3129
3130                 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3131                                          DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3132                 if (ret < 0) {
3133                         DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3134                         ret = -1;
3135                         goto out_unlock;
3136                 }
3137
3138                 /* Some hubs forget their guids after they resume */
3139                 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3140                 if (sret != 16) {
3141                         DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3142                         ret = -1;
3143                         goto out_unlock;
3144                 }
3145                 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3146
3147                 ret = 0;
3148         } else
3149                 ret = -1;
3150
3151 out_unlock:
3152         mutex_unlock(&mgr->lock);
3153         return ret;
3154 }
3155 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3156
3157 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3158 {
3159         int len;
3160         u8 replyblock[32];
3161         int replylen, origlen, curreply;
3162         int ret;
3163         struct drm_dp_sideband_msg_rx *msg;
3164         int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3165         msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3166
3167         len = min(mgr->max_dpcd_transaction_bytes, 16);
3168         ret = drm_dp_dpcd_read(mgr->aux, basereg,
3169                                replyblock, len);
3170         if (ret != len) {
3171                 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3172                 return false;
3173         }
3174         ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3175         if (!ret) {
3176                 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3177                 return false;
3178         }
3179         replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3180
3181         origlen = replylen;
3182         replylen -= len;
3183         curreply = len;
3184         while (replylen > 0) {
3185                 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3186                 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3187                                     replyblock, len);
3188                 if (ret != len) {
3189                         DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3190                                       len, ret);
3191                         return false;
3192                 }
3193
3194                 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3195                 if (!ret) {
3196                         DRM_DEBUG_KMS("failed to build sideband msg\n");
3197                         return false;
3198                 }
3199
3200                 curreply += len;
3201                 replylen -= len;
3202         }
3203         return true;
3204 }
3205
3206 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3207 {
3208         struct drm_dp_sideband_msg_tx *txmsg;
3209         struct drm_dp_mst_branch *mstb;
3210         struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3211         int slot = -1;
3212
3213         if (!drm_dp_get_one_sb_msg(mgr, false))
3214                 goto clear_down_rep_recv;
3215
3216         if (!mgr->down_rep_recv.have_eomt)
3217                 return 0;
3218
3219         mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3220         if (!mstb) {
3221                 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3222                               hdr->lct);
3223                 goto clear_down_rep_recv;
3224         }
3225
3226         /* find the message */
3227         slot = hdr->seqno;
3228         mutex_lock(&mgr->qlock);
3229         txmsg = mstb->tx_slots[slot];
3230         /* remove from slots */
3231         mutex_unlock(&mgr->qlock);
3232
3233         if (!txmsg) {
3234                 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3235                               mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3236                               mgr->down_rep_recv.msg[0]);
3237                 goto no_msg;
3238         }
3239
3240         drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3241
3242         if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3243                 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3244                               txmsg->reply.req_type,
3245                               drm_dp_mst_req_type_str(txmsg->reply.req_type),
3246                               txmsg->reply.u.nak.reason,
3247                               drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3248                               txmsg->reply.u.nak.nak_data);
3249
3250         memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3251         drm_dp_mst_topology_put_mstb(mstb);
3252
3253         mutex_lock(&mgr->qlock);
3254         txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3255         mstb->tx_slots[slot] = NULL;
3256         mutex_unlock(&mgr->qlock);
3257
3258         wake_up_all(&mgr->tx_waitq);
3259
3260         return 0;
3261
3262 no_msg:
3263         drm_dp_mst_topology_put_mstb(mstb);
3264 clear_down_rep_recv:
3265         memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3266
3267         return 0;
3268 }
3269
3270 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3271 {
3272         struct drm_dp_sideband_msg_req_body msg;
3273         struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3274         struct drm_dp_mst_branch *mstb = NULL;
3275         const u8 *guid;
3276         bool seqno;
3277
3278         if (!drm_dp_get_one_sb_msg(mgr, true))
3279                 goto out;
3280
3281         if (!mgr->up_req_recv.have_eomt)
3282                 return 0;
3283
3284         if (!hdr->broadcast) {
3285                 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3286                 if (!mstb) {
3287                         DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3288                                       hdr->lct);
3289                         goto out;
3290                 }
3291         }
3292
3293         seqno = hdr->seqno;
3294         drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
3295
3296         if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY)
3297                 guid = msg.u.conn_stat.guid;
3298         else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY)
3299                 guid = msg.u.resource_stat.guid;
3300         else
3301                 goto out;
3302
3303         drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno,
3304                                  false);
3305
3306         if (!mstb) {
3307                 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3308                 if (!mstb) {
3309                         DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3310                                       hdr->lct);
3311                         goto out;
3312                 }
3313         }
3314
3315         if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3316                 drm_dp_update_port(mstb, &msg.u.conn_stat);
3317
3318                 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3319                               msg.u.conn_stat.port_number,
3320                               msg.u.conn_stat.legacy_device_plug_status,
3321                               msg.u.conn_stat.displayport_device_plug_status,
3322                               msg.u.conn_stat.message_capability_status,
3323                               msg.u.conn_stat.input_port,
3324                               msg.u.conn_stat.peer_device_type);
3325
3326                 drm_kms_helper_hotplug_event(mgr->dev);
3327         } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3328                 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3329                               msg.u.resource_stat.port_number,
3330                               msg.u.resource_stat.available_pbn);
3331         }
3332
3333         drm_dp_mst_topology_put_mstb(mstb);
3334 out:
3335         memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3336         return 0;
3337 }
3338
3339 /**
3340  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3341  * @mgr: manager to notify irq for.
3342  * @esi: 4 bytes from SINK_COUNT_ESI
3343  * @handled: whether the hpd interrupt was consumed or not
3344  *
3345  * This should be called from the driver when it detects a short IRQ,
3346  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3347  * topology manager will process the sideband messages received as a result
3348  * of this.
3349  */
3350 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3351 {
3352         int ret = 0;
3353         int sc;
3354         *handled = false;
3355         sc = esi[0] & 0x3f;
3356
3357         if (sc != mgr->sink_count) {
3358                 mgr->sink_count = sc;
3359                 *handled = true;
3360         }
3361
3362         if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3363                 ret = drm_dp_mst_handle_down_rep(mgr);
3364                 *handled = true;
3365         }
3366
3367         if (esi[1] & DP_UP_REQ_MSG_RDY) {
3368                 ret |= drm_dp_mst_handle_up_req(mgr);
3369                 *handled = true;
3370         }
3371
3372         drm_dp_mst_kick_tx(mgr);
3373         return ret;
3374 }
3375 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3376
3377 /**
3378  * drm_dp_mst_detect_port() - get connection status for an MST port
3379  * @connector: DRM connector for this port
3380  * @mgr: manager for this port
3381  * @port: unverified pointer to a port
3382  *
3383  * This returns the current connection state for a port. It validates the
3384  * port pointer still exists so the caller doesn't require a reference
3385  */
3386 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
3387                                                  struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3388 {
3389         enum drm_connector_status status = connector_status_disconnected;
3390
3391         /* we need to search for the port in the mgr in case it's gone */
3392         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3393         if (!port)
3394                 return connector_status_disconnected;
3395
3396         if (!port->ddps)
3397                 goto out;
3398
3399         switch (port->pdt) {
3400         case DP_PEER_DEVICE_NONE:
3401         case DP_PEER_DEVICE_MST_BRANCHING:
3402                 break;
3403
3404         case DP_PEER_DEVICE_SST_SINK:
3405                 status = connector_status_connected;
3406                 /* for logical ports - cache the EDID */
3407                 if (port->port_num >= 8 && !port->cached_edid) {
3408                         port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3409                 }
3410                 break;
3411         case DP_PEER_DEVICE_DP_LEGACY_CONV:
3412                 if (port->ldps)
3413                         status = connector_status_connected;
3414                 break;
3415         }
3416 out:
3417         drm_dp_mst_topology_put_port(port);
3418         return status;
3419 }
3420 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3421
3422 /**
3423  * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3424  * @mgr: manager for this port
3425  * @port: unverified pointer to a port.
3426  *
3427  * This returns whether the port supports audio or not.
3428  */
3429 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3430                                         struct drm_dp_mst_port *port)
3431 {
3432         bool ret = false;
3433
3434         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3435         if (!port)
3436                 return ret;
3437         ret = port->has_audio;
3438         drm_dp_mst_topology_put_port(port);
3439         return ret;
3440 }
3441 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3442
3443 /**
3444  * drm_dp_mst_get_edid() - get EDID for an MST port
3445  * @connector: toplevel connector to get EDID for
3446  * @mgr: manager for this port
3447  * @port: unverified pointer to a port.
3448  *
3449  * This returns an EDID for the port connected to a connector,
3450  * It validates the pointer still exists so the caller doesn't require a
3451  * reference.
3452  */
3453 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3454 {
3455         struct edid *edid = NULL;
3456
3457         /* we need to search for the port in the mgr in case it's gone */
3458         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3459         if (!port)
3460                 return NULL;
3461
3462         if (port->cached_edid)
3463                 edid = drm_edid_duplicate(port->cached_edid);
3464         else {
3465                 edid = drm_get_edid(connector, &port->aux.ddc);
3466         }
3467         port->has_audio = drm_detect_monitor_audio(edid);
3468         drm_dp_mst_topology_put_port(port);
3469         return edid;
3470 }
3471 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3472
3473 /**
3474  * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3475  * @mgr: manager to use
3476  * @pbn: payload bandwidth to convert into slots.
3477  *
3478  * Calculate the number of VCPI slots that will be required for the given PBN
3479  * value. This function is deprecated, and should not be used in atomic
3480  * drivers.
3481  *
3482  * RETURNS:
3483  * The total slots required for this port, or error.
3484  */
3485 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3486                            int pbn)
3487 {
3488         int num_slots;
3489
3490         num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3491
3492         /* max. time slots - one slot for MTP header */
3493         if (num_slots > 63)
3494                 return -ENOSPC;
3495         return num_slots;
3496 }
3497 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3498
3499 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3500                             struct drm_dp_vcpi *vcpi, int pbn, int slots)
3501 {
3502         int ret;
3503
3504         /* max. time slots - one slot for MTP header */
3505         if (slots > 63)
3506                 return -ENOSPC;
3507
3508         vcpi->pbn = pbn;
3509         vcpi->aligned_pbn = slots * mgr->pbn_div;
3510         vcpi->num_slots = slots;
3511
3512         ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3513         if (ret < 0)
3514                 return ret;
3515         return 0;
3516 }
3517
3518 /**
3519  * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
3520  * @state: global atomic state
3521  * @mgr: MST topology manager for the port
3522  * @port: port to find vcpi slots for
3523  * @pbn: bandwidth required for the mode in PBN
3524  *
3525  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
3526  * may have had. Any atomic drivers which support MST must call this function
3527  * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3528  * current VCPI allocation for the new state, but only when
3529  * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3530  * to ensure compatibility with userspace applications that still use the
3531  * legacy modesetting UAPI.
3532  *
3533  * Allocations set by this function are not checked against the bandwidth
3534  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3535  *
3536  * Additionally, it is OK to call this function multiple times on the same
3537  * @port as needed. It is not OK however, to call this function and
3538  * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3539  *
3540  * See also:
3541  * drm_dp_atomic_release_vcpi_slots()
3542  * drm_dp_mst_atomic_check()
3543  *
3544  * Returns:
3545  * Total slots in the atomic state assigned for this port, or a negative error
3546  * code if the port no longer exists
3547  */
3548 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3549                                   struct drm_dp_mst_topology_mgr *mgr,
3550                                   struct drm_dp_mst_port *port, int pbn)
3551 {
3552         struct drm_dp_mst_topology_state *topology_state;
3553         struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3554         int prev_slots, req_slots, ret;
3555
3556         topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3557         if (IS_ERR(topology_state))
3558                 return PTR_ERR(topology_state);
3559
3560         /* Find the current allocation for this port, if any */
3561         list_for_each_entry(pos, &topology_state->vcpis, next) {
3562                 if (pos->port == port) {
3563                         vcpi = pos;
3564                         prev_slots = vcpi->vcpi;
3565
3566                         /*
3567                          * This should never happen, unless the driver tries
3568                          * releasing and allocating the same VCPI allocation,
3569                          * which is an error
3570                          */
3571                         if (WARN_ON(!prev_slots)) {
3572                                 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3573                                           port);
3574                                 return -EINVAL;
3575                         }
3576
3577                         break;
3578                 }
3579         }
3580         if (!vcpi)
3581                 prev_slots = 0;
3582
3583         req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3584
3585         DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3586                          port->connector->base.id, port->connector->name,
3587                          port, prev_slots, req_slots);
3588
3589         /* Add the new allocation to the state */
3590         if (!vcpi) {
3591                 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3592                 if (!vcpi)
3593                         return -ENOMEM;
3594
3595                 drm_dp_mst_get_port_malloc(port);
3596                 vcpi->port = port;
3597                 list_add(&vcpi->next, &topology_state->vcpis);
3598         }
3599         vcpi->vcpi = req_slots;
3600
3601         ret = req_slots;
3602         return ret;
3603 }
3604 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3605
3606 /**
3607  * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
3608  * @state: global atomic state
3609  * @mgr: MST topology manager for the port
3610  * @port: The port to release the VCPI slots from
3611  *
3612  * Releases any VCPI slots that have been allocated to a port in the atomic
3613  * state. Any atomic drivers which support MST must call this function in
3614  * their &drm_connector_helper_funcs.atomic_check() callback when the
3615  * connector will no longer have VCPI allocated (e.g. because its CRTC was
3616  * removed) when it had VCPI allocated in the previous atomic state.
3617  *
3618  * It is OK to call this even if @port has been removed from the system.
3619  * Additionally, it is OK to call this function multiple times on the same
3620  * @port as needed. It is not OK however, to call this function and
3621  * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3622  * phase.
3623  *
3624  * See also:
3625  * drm_dp_atomic_find_vcpi_slots()
3626  * drm_dp_mst_atomic_check()
3627  *
3628  * Returns:
3629  * 0 if all slots for this port were added back to
3630  * &drm_dp_mst_topology_state.avail_slots or negative error code
3631  */
3632 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3633                                      struct drm_dp_mst_topology_mgr *mgr,
3634                                      struct drm_dp_mst_port *port)
3635 {
3636         struct drm_dp_mst_topology_state *topology_state;
3637         struct drm_dp_vcpi_allocation *pos;
3638         bool found = false;
3639
3640         topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3641         if (IS_ERR(topology_state))
3642                 return PTR_ERR(topology_state);
3643
3644         list_for_each_entry(pos, &topology_state->vcpis, next) {
3645                 if (pos->port == port) {
3646                         found = true;
3647                         break;
3648                 }
3649         }
3650         if (WARN_ON(!found)) {
3651                 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3652                           port, &topology_state->base);
3653                 return -EINVAL;
3654         }
3655
3656         DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3657         if (pos->vcpi) {
3658                 drm_dp_mst_put_port_malloc(port);
3659                 pos->vcpi = 0;
3660         }
3661
3662         return 0;
3663 }
3664 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3665
3666 /**
3667  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
3668  * @mgr: manager for this port
3669  * @port: port to allocate a virtual channel for.
3670  * @pbn: payload bandwidth number to request
3671  * @slots: returned number of slots for this PBN.
3672  */
3673 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3674                               struct drm_dp_mst_port *port, int pbn, int slots)
3675 {
3676         int ret;
3677
3678         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3679         if (!port)
3680                 return false;
3681
3682         if (slots < 0)
3683                 return false;
3684
3685         if (port->vcpi.vcpi > 0) {
3686                 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3687                               port->vcpi.vcpi, port->vcpi.pbn, pbn);
3688                 if (pbn == port->vcpi.pbn) {
3689                         drm_dp_mst_topology_put_port(port);
3690                         return true;
3691                 }
3692         }
3693
3694         ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3695         if (ret) {
3696                 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3697                               DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3698                 goto out;
3699         }
3700         DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3701                       pbn, port->vcpi.num_slots);
3702
3703         /* Keep port allocated until its payload has been removed */
3704         drm_dp_mst_get_port_malloc(port);
3705         drm_dp_mst_topology_put_port(port);
3706         return true;
3707 out:
3708         return false;
3709 }
3710 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3711
3712 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3713 {
3714         int slots = 0;
3715         port = drm_dp_mst_topology_get_port_validated(mgr, port);
3716         if (!port)
3717                 return slots;
3718
3719         slots = port->vcpi.num_slots;
3720         drm_dp_mst_topology_put_port(port);
3721         return slots;
3722 }
3723 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3724
3725 /**
3726  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
3727  * @mgr: manager for this port
3728  * @port: unverified pointer to a port.
3729  *
3730  * This just resets the number of slots for the ports VCPI for later programming.
3731  */
3732 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3733 {
3734         /*
3735          * A port with VCPI will remain allocated until its VCPI is
3736          * released, no verified ref needed
3737          */
3738
3739         port->vcpi.num_slots = 0;
3740 }
3741 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3742
3743 /**
3744  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
3745  * @mgr: manager for this port
3746  * @port: port to deallocate vcpi for
3747  *
3748  * This can be called unconditionally, regardless of whether
3749  * drm_dp_mst_allocate_vcpi() succeeded or not.
3750  */
3751 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3752                                 struct drm_dp_mst_port *port)
3753 {
3754         if (!port->vcpi.vcpi)
3755                 return;
3756
3757         drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3758         port->vcpi.num_slots = 0;
3759         port->vcpi.pbn = 0;
3760         port->vcpi.aligned_pbn = 0;
3761         port->vcpi.vcpi = 0;
3762         drm_dp_mst_put_port_malloc(port);
3763 }
3764 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3765
3766 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3767                                      int id, struct drm_dp_payload *payload)
3768 {
3769         u8 payload_alloc[3], status;
3770         int ret;
3771         int retries = 0;
3772
3773         drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3774                            DP_PAYLOAD_TABLE_UPDATED);
3775
3776         payload_alloc[0] = id;
3777         payload_alloc[1] = payload->start_slot;
3778         payload_alloc[2] = payload->num_slots;
3779
3780         ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3781         if (ret != 3) {
3782                 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3783                 goto fail;
3784         }
3785
3786 retry:
3787         ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3788         if (ret < 0) {
3789                 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3790                 goto fail;
3791         }
3792
3793         if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3794                 retries++;
3795                 if (retries < 20) {
3796                         usleep_range(10000, 20000);
3797                         goto retry;
3798                 }
3799                 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3800                 ret = -EINVAL;
3801                 goto fail;
3802         }
3803         ret = 0;
3804 fail:
3805         return ret;
3806 }
3807
3808
3809 /**
3810  * drm_dp_check_act_status() - Check ACT handled status.
3811  * @mgr: manager to use
3812  *
3813  * Check the payload status bits in the DPCD for ACT handled completion.
3814  */
3815 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3816 {
3817         u8 status;
3818         int ret;
3819         int count = 0;
3820
3821         do {
3822                 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3823
3824                 if (ret < 0) {
3825                         DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3826                         goto fail;
3827                 }
3828
3829                 if (status & DP_PAYLOAD_ACT_HANDLED)
3830                         break;
3831                 count++;
3832                 udelay(100);
3833
3834         } while (count < 30);
3835
3836         if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3837                 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3838                 ret = -EINVAL;
3839                 goto fail;
3840         }
3841         return 0;
3842 fail:
3843         return ret;
3844 }
3845 EXPORT_SYMBOL(drm_dp_check_act_status);
3846
3847 /**
3848  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
3849  * @clock: dot clock for the mode
3850  * @bpp: bpp for the mode.
3851  *
3852  * This uses the formula in the spec to calculate the PBN value for a mode.
3853  */
3854 int drm_dp_calc_pbn_mode(int clock, int bpp)
3855 {
3856         u64 kbps;
3857         s64 peak_kbps;
3858         u32 numerator;
3859         u32 denominator;
3860
3861         kbps = clock * bpp;
3862
3863         /*
3864          * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3865          * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3866          * common multiplier to render an integer PBN for all link rate/lane
3867          * counts combinations
3868          * calculate
3869          * peak_kbps *= (1006/1000)
3870          * peak_kbps *= (64/54)
3871          * peak_kbps *= 8    convert to bytes
3872          */
3873
3874         numerator = 64 * 1006;
3875         denominator = 54 * 8 * 1000 * 1000;
3876
3877         kbps *= numerator;
3878         peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3879
3880         return drm_fixp2int_ceil(peak_kbps);
3881 }
3882 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3883
3884 /* we want to kick the TX after we've ack the up/down IRQs. */
3885 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3886 {
3887         queue_work(system_long_wq, &mgr->tx_work);
3888 }
3889
3890 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3891                                  struct drm_dp_mst_branch *mstb)
3892 {
3893         struct drm_dp_mst_port *port;
3894         int tabs = mstb->lct;
3895         char prefix[10];
3896         int i;
3897
3898         for (i = 0; i < tabs; i++)
3899                 prefix[i] = '\t';
3900         prefix[i] = '\0';
3901
3902         seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3903         list_for_each_entry(port, &mstb->ports, next) {
3904                 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3905                 if (port->mstb)
3906                         drm_dp_mst_dump_mstb(m, port->mstb);
3907         }
3908 }
3909
3910 #define DP_PAYLOAD_TABLE_SIZE           64
3911
3912 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3913                                   char *buf)
3914 {
3915         int i;
3916
3917         for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3918                 if (drm_dp_dpcd_read(mgr->aux,
3919                                      DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3920                                      &buf[i], 16) != 16)
3921                         return false;
3922         }
3923         return true;
3924 }
3925
3926 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3927                                struct drm_dp_mst_port *port, char *name,
3928                                int namelen)
3929 {
3930         struct edid *mst_edid;
3931
3932         mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3933         drm_edid_get_monitor_name(mst_edid, name, namelen);
3934 }
3935
3936 /**
3937  * drm_dp_mst_dump_topology(): dump topology to seq file.
3938  * @m: seq_file to dump output to
3939  * @mgr: manager to dump current topology for.
3940  *
3941  * helper to dump MST topology to a seq file for debugfs.
3942  */
3943 void drm_dp_mst_dump_topology(struct seq_file *m,
3944                               struct drm_dp_mst_topology_mgr *mgr)
3945 {
3946         int i;
3947         struct drm_dp_mst_port *port;
3948
3949         mutex_lock(&mgr->lock);
3950         if (mgr->mst_primary)
3951                 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3952
3953         /* dump VCPIs */
3954         mutex_unlock(&mgr->lock);
3955
3956         mutex_lock(&mgr->payload_lock);
3957         seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3958                 mgr->max_payloads);
3959
3960         for (i = 0; i < mgr->max_payloads; i++) {
3961                 if (mgr->proposed_vcpis[i]) {
3962                         char name[14];
3963
3964                         port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3965                         fetch_monitor_name(mgr, port, name, sizeof(name));
3966                         seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3967                                    port->port_num, port->vcpi.vcpi,
3968                                    port->vcpi.num_slots,
3969                                    (*name != 0) ? name :  "Unknown");
3970                 } else
3971                         seq_printf(m, "vcpi %d:unused\n", i);
3972         }
3973         for (i = 0; i < mgr->max_payloads; i++) {
3974                 seq_printf(m, "payload %d: %d, %d, %d\n",
3975                            i,
3976                            mgr->payloads[i].payload_state,
3977                            mgr->payloads[i].start_slot,
3978                            mgr->payloads[i].num_slots);
3979
3980
3981         }
3982         mutex_unlock(&mgr->payload_lock);
3983
3984         mutex_lock(&mgr->lock);
3985         if (mgr->mst_primary) {
3986                 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3987                 int ret;
3988
3989                 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3990                 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3991                 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3992                 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3993                 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3994                 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3995
3996                 /* dump the standard OUI branch header */
3997                 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3998                 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3999                 for (i = 0x3; i < 0x8 && buf[i]; i++)
4000                         seq_printf(m, "%c", buf[i]);
4001                 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4002                            buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4003                 if (dump_dp_payload_table(mgr, buf))
4004                         seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4005         }
4006
4007         mutex_unlock(&mgr->lock);
4008
4009 }
4010 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4011
4012 static void drm_dp_tx_work(struct work_struct *work)
4013 {
4014         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4015
4016         mutex_lock(&mgr->qlock);
4017         if (!list_empty(&mgr->tx_msg_downq))
4018                 process_single_down_tx_qlock(mgr);
4019         mutex_unlock(&mgr->qlock);
4020 }
4021
4022 static void drm_dp_destroy_connector_work(struct work_struct *work)
4023 {
4024         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
4025         struct drm_dp_mst_port *port;
4026         bool send_hotplug = false;
4027         /*
4028          * Not a regular list traverse as we have to drop the destroy
4029          * connector lock before destroying the connector, to avoid AB->BA
4030          * ordering between this lock and the config mutex.
4031          */
4032         for (;;) {
4033                 mutex_lock(&mgr->destroy_connector_lock);
4034                 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
4035                 if (!port) {
4036                         mutex_unlock(&mgr->destroy_connector_lock);
4037                         break;
4038                 }
4039                 list_del(&port->next);
4040                 mutex_unlock(&mgr->destroy_connector_lock);
4041
4042                 mgr->cbs->destroy_connector(mgr, port->connector);
4043
4044                 drm_dp_port_teardown_pdt(port, port->pdt);
4045                 port->pdt = DP_PEER_DEVICE_NONE;
4046
4047                 drm_dp_mst_put_port_malloc(port);
4048                 send_hotplug = true;
4049         }
4050         if (send_hotplug)
4051                 drm_kms_helper_hotplug_event(mgr->dev);
4052 }
4053
4054 static struct drm_private_state *
4055 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4056 {
4057         struct drm_dp_mst_topology_state *state, *old_state =
4058                 to_dp_mst_topology_state(obj->state);
4059         struct drm_dp_vcpi_allocation *pos, *vcpi;
4060
4061         state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4062         if (!state)
4063                 return NULL;
4064
4065         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4066
4067         INIT_LIST_HEAD(&state->vcpis);
4068
4069         list_for_each_entry(pos, &old_state->vcpis, next) {
4070                 /* Prune leftover freed VCPI allocations */
4071                 if (!pos->vcpi)
4072                         continue;
4073
4074                 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4075                 if (!vcpi)
4076                         goto fail;
4077
4078                 drm_dp_mst_get_port_malloc(vcpi->port);
4079                 list_add(&vcpi->next, &state->vcpis);
4080         }
4081
4082         return &state->base;
4083
4084 fail:
4085         list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4086                 drm_dp_mst_put_port_malloc(pos->port);
4087                 kfree(pos);
4088         }
4089         kfree(state);
4090
4091         return NULL;
4092 }
4093
4094 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4095                                      struct drm_private_state *state)
4096 {
4097         struct drm_dp_mst_topology_state *mst_state =
4098                 to_dp_mst_topology_state(state);
4099         struct drm_dp_vcpi_allocation *pos, *tmp;
4100
4101         list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4102                 /* We only keep references to ports with non-zero VCPIs */
4103                 if (pos->vcpi)
4104                         drm_dp_mst_put_port_malloc(pos->port);
4105                 kfree(pos);
4106         }
4107
4108         kfree(mst_state);
4109 }
4110
4111 static inline int
4112 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
4113                                        struct drm_dp_mst_topology_state *mst_state)
4114 {
4115         struct drm_dp_vcpi_allocation *vcpi;
4116         int avail_slots = 63, payload_count = 0;
4117
4118         list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4119                 /* Releasing VCPI is always OK-even if the port is gone */
4120                 if (!vcpi->vcpi) {
4121                         DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4122                                          vcpi->port);
4123                         continue;
4124                 }
4125
4126                 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4127                                  vcpi->port, vcpi->vcpi);
4128
4129                 avail_slots -= vcpi->vcpi;
4130                 if (avail_slots < 0) {
4131                         DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4132                                          vcpi->port, mst_state,
4133                                          avail_slots + vcpi->vcpi);
4134                         return -ENOSPC;
4135                 }
4136
4137                 if (++payload_count > mgr->max_payloads) {
4138                         DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4139                                          mgr, mst_state, mgr->max_payloads);
4140                         return -EINVAL;
4141                 }
4142         }
4143         DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4144                          mgr, mst_state, avail_slots,
4145                          63 - avail_slots);
4146
4147         return 0;
4148 }
4149
4150 /**
4151  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
4152  * atomic update is valid
4153  * @state: Pointer to the new &struct drm_dp_mst_topology_state
4154  *
4155  * Checks the given topology state for an atomic update to ensure that it's
4156  * valid. This includes checking whether there's enough bandwidth to support
4157  * the new VCPI allocations in the atomic update.
4158  *
4159  * Any atomic drivers supporting DP MST must make sure to call this after
4160  * checking the rest of their state in their
4161  * &drm_mode_config_funcs.atomic_check() callback.
4162  *
4163  * See also:
4164  * drm_dp_atomic_find_vcpi_slots()
4165  * drm_dp_atomic_release_vcpi_slots()
4166  *
4167  * Returns:
4168  *
4169  * 0 if the new state is valid, negative error code otherwise.
4170  */
4171 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
4172 {
4173         struct drm_dp_mst_topology_mgr *mgr;
4174         struct drm_dp_mst_topology_state *mst_state;
4175         int i, ret = 0;
4176
4177         for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4178                 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
4179                 if (ret)
4180                         break;
4181         }
4182
4183         return ret;
4184 }
4185 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
4186
4187 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
4188         .atomic_duplicate_state = drm_dp_mst_duplicate_state,
4189         .atomic_destroy_state = drm_dp_mst_destroy_state,
4190 };
4191 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
4192
4193 /**
4194  * drm_atomic_get_mst_topology_state: get MST topology state
4195  *
4196  * @state: global atomic state
4197  * @mgr: MST topology manager, also the private object in this case
4198  *
4199  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
4200  * state vtable so that the private object state returned is that of a MST
4201  * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
4202  * to care of the locking, so warn if don't hold the connection_mutex.
4203  *
4204  * RETURNS:
4205  *
4206  * The MST topology state or error pointer.
4207  */
4208 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
4209                                                                     struct drm_dp_mst_topology_mgr *mgr)
4210 {
4211         struct drm_device *dev = mgr->dev;
4212
4213         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4214         return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
4215 }
4216 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
4217
4218 /**
4219  * drm_dp_mst_topology_mgr_init - initialise a topology manager
4220  * @mgr: manager struct to initialise
4221  * @dev: device providing this structure - for i2c addition.
4222  * @aux: DP helper aux channel to talk to this device
4223  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
4224  * @max_payloads: maximum number of payloads this GPU can source
4225  * @conn_base_id: the connector object ID the MST device is connected to.
4226  *
4227  * Return 0 for success, or negative error code on failure
4228  */
4229 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4230                                  struct drm_device *dev, struct drm_dp_aux *aux,
4231                                  int max_dpcd_transaction_bytes,
4232                                  int max_payloads, int conn_base_id)
4233 {
4234         struct drm_dp_mst_topology_state *mst_state;
4235
4236         mutex_init(&mgr->lock);
4237         mutex_init(&mgr->qlock);
4238         mutex_init(&mgr->payload_lock);
4239         mutex_init(&mgr->destroy_connector_lock);
4240         INIT_LIST_HEAD(&mgr->tx_msg_downq);
4241         INIT_LIST_HEAD(&mgr->destroy_connector_list);
4242         INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
4243         INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
4244         INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
4245         init_waitqueue_head(&mgr->tx_waitq);
4246         mgr->dev = dev;
4247         mgr->aux = aux;
4248         mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
4249         mgr->max_payloads = max_payloads;
4250         mgr->conn_base_id = conn_base_id;
4251         if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
4252             max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
4253                 return -EINVAL;
4254         mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
4255         if (!mgr->payloads)
4256                 return -ENOMEM;
4257         mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
4258         if (!mgr->proposed_vcpis)
4259                 return -ENOMEM;
4260         set_bit(0, &mgr->payload_mask);
4261
4262         mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
4263         if (mst_state == NULL)
4264                 return -ENOMEM;
4265
4266         mst_state->mgr = mgr;
4267         INIT_LIST_HEAD(&mst_state->vcpis);
4268
4269         drm_atomic_private_obj_init(dev, &mgr->base,
4270                                     &mst_state->base,
4271                                     &drm_dp_mst_topology_state_funcs);
4272
4273         return 0;
4274 }
4275 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4276
4277 /**
4278  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4279  * @mgr: manager to destroy
4280  */
4281 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4282 {
4283         drm_dp_mst_topology_mgr_set_mst(mgr, false);
4284         flush_work(&mgr->work);
4285         flush_work(&mgr->destroy_connector_work);
4286         mutex_lock(&mgr->payload_lock);
4287         kfree(mgr->payloads);
4288         mgr->payloads = NULL;
4289         kfree(mgr->proposed_vcpis);
4290         mgr->proposed_vcpis = NULL;
4291         mutex_unlock(&mgr->payload_lock);
4292         mgr->dev = NULL;
4293         mgr->aux = NULL;
4294         drm_atomic_private_obj_fini(&mgr->base);
4295         mgr->funcs = NULL;
4296 }
4297 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4298
4299 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4300 {
4301         int i;
4302
4303         if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4304                 return false;
4305
4306         for (i = 0; i < num - 1; i++) {
4307                 if (msgs[i].flags & I2C_M_RD ||
4308                     msgs[i].len > 0xff)
4309                         return false;
4310         }
4311
4312         return msgs[num - 1].flags & I2C_M_RD &&
4313                 msgs[num - 1].len <= 0xff;
4314 }
4315
4316 /* I2C device */
4317 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4318                                int num)
4319 {
4320         struct drm_dp_aux *aux = adapter->algo_data;
4321         struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4322         struct drm_dp_mst_branch *mstb;
4323         struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4324         unsigned int i;
4325         struct drm_dp_sideband_msg_req_body msg;
4326         struct drm_dp_sideband_msg_tx *txmsg = NULL;
4327         int ret;
4328
4329         mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4330         if (!mstb)
4331                 return -EREMOTEIO;
4332
4333         if (!remote_i2c_read_ok(msgs, num)) {
4334                 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4335                 ret = -EIO;
4336                 goto out;
4337         }
4338
4339         memset(&msg, 0, sizeof(msg));
4340         msg.req_type = DP_REMOTE_I2C_READ;
4341         msg.u.i2c_read.num_transactions = num - 1;
4342         msg.u.i2c_read.port_number = port->port_num;
4343         for (i = 0; i < num - 1; i++) {
4344                 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4345                 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4346                 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4347                 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4348         }
4349         msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4350         msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4351
4352         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4353         if (!txmsg) {
4354                 ret = -ENOMEM;
4355                 goto out;
4356         }
4357
4358         txmsg->dst = mstb;
4359         drm_dp_encode_sideband_req(&msg, txmsg);
4360
4361         drm_dp_queue_down_tx(mgr, txmsg);
4362
4363         ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4364         if (ret > 0) {
4365
4366                 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4367                         ret = -EREMOTEIO;
4368                         goto out;
4369                 }
4370                 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4371                         ret = -EIO;
4372                         goto out;
4373                 }
4374                 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4375                 ret = num;
4376         }
4377 out:
4378         kfree(txmsg);
4379         drm_dp_mst_topology_put_mstb(mstb);
4380         return ret;
4381 }
4382
4383 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4384 {
4385         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4386                I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4387                I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4388                I2C_FUNC_10BIT_ADDR;
4389 }
4390
4391 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4392         .functionality = drm_dp_mst_i2c_functionality,
4393         .master_xfer = drm_dp_mst_i2c_xfer,
4394 };
4395
4396 /**
4397  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
4398  * @aux: DisplayPort AUX channel
4399  *
4400  * Returns 0 on success or a negative error code on failure.
4401  */
4402 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4403 {
4404         aux->ddc.algo = &drm_dp_mst_i2c_algo;
4405         aux->ddc.algo_data = aux;
4406         aux->ddc.retries = 3;
4407
4408         aux->ddc.class = I2C_CLASS_DDC;
4409         aux->ddc.owner = THIS_MODULE;
4410         aux->ddc.dev.parent = aux->dev;
4411         aux->ddc.dev.of_node = aux->dev->of_node;
4412
4413         strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4414                 sizeof(aux->ddc.name));
4415
4416         return i2c_add_adapter(&aux->ddc);
4417 }
4418
4419 /**
4420  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4421  * @aux: DisplayPort AUX channel
4422  */
4423 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4424 {
4425         i2c_del_adapter(&aux->ddc);
4426 }