2 * Copyright 2009 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <drm/display/drm_dp_helper.h>
27 #include "nouveau_drv.h"
28 #include "nouveau_connector.h"
29 #include "nouveau_encoder.h"
30 #include "nouveau_crtc.h"
32 #include <nvif/if0011.h>
34 MODULE_PARM_DESC(mst, "Enable DisplayPort multi-stream (default: enabled)");
35 static int nouveau_mst = 1;
36 module_param_named(mst, nouveau_mst, int, 0400);
39 nouveau_dp_has_sink_count(struct drm_connector *connector,
40 struct nouveau_encoder *outp)
42 return drm_dp_read_sink_count_cap(connector, outp->dp.dpcd, &outp->dp.desc);
46 nouveau_dp_probe_lttpr(struct nouveau_encoder *outp)
48 u8 rev, size = sizeof(rev);
51 ret = nvif_outp_dp_aux_xfer(&outp->outp, DP_AUX_NATIVE_READ, &size,
52 DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
54 if (ret || size < sizeof(rev) || rev < 0x14)
60 static enum drm_connector_status
61 nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
62 struct nouveau_encoder *outp)
64 struct drm_connector *connector = &nv_connector->base;
65 struct drm_dp_aux *aux = &nv_connector->aux;
66 struct nv50_mstm *mstm = NULL;
67 enum drm_connector_status status = connector_status_disconnected;
69 u8 *dpcd = outp->dp.dpcd;
71 outp->dp.lttpr.nr = 0;
76 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
77 nouveau_dp_probe_lttpr(outp) &&
78 !drm_dp_read_dpcd_caps(aux, dpcd) &&
79 !drm_dp_read_lttpr_common_caps(aux, dpcd, outp->dp.lttpr.caps)) {
80 int nr = drm_dp_lttpr_count(outp->dp.lttpr.caps);
83 drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
84 DP_PHY_REPEATER_MODE_TRANSPARENT);
87 ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
88 DP_PHY_REPEATER_MODE_NON_TRANSPARENT);
90 drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
91 DP_PHY_REPEATER_MODE_TRANSPARENT);
93 outp->dp.lttpr.nr = nr;
99 ret = drm_dp_read_dpcd_caps(aux, dpcd);
103 outp->dp.link_nr = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
104 if (outp->dcb->dpconf.link_nr < outp->dp.link_nr)
105 outp->dp.link_nr = outp->dcb->dpconf.link_nr;
107 if (outp->dp.lttpr.nr) {
108 int links = drm_dp_lttpr_max_lane_count(outp->dp.lttpr.caps);
110 if (links && links < outp->dp.link_nr)
111 outp->dp.link_nr = links;
114 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
115 __le16 rates[DP_MAX_SUPPORTED_RATES];
117 ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, rates, sizeof(rates));
118 if (ret == sizeof(rates)) {
119 for (int i = 0; i < ARRAY_SIZE(rates); i++) {
120 u32 rate = (le16_to_cpu(rates[i]) * 200) / 10;
126 for (j = 0; j < outp->dp.rate_nr; j++) {
127 if (rate > outp->dp.rate[j].rate) {
128 for (int k = outp->dp.rate_nr; k > j; k--)
129 outp->dp.rate[k] = outp->dp.rate[k - 1];
134 outp->dp.rate[j].dpcd = i;
135 outp->dp.rate[j].rate = rate;
141 if (!outp->dp.rate_nr) {
142 const u32 rates[] = { 810000, 540000, 270000, 162000 };
143 u32 max_rate = dpcd[DP_MAX_LINK_RATE] * 27000;
145 if (outp->dp.lttpr.nr) {
146 int rate = drm_dp_lttpr_max_link_rate(outp->dp.lttpr.caps);
148 if (rate && rate < max_rate)
152 max_rate = min_t(int, max_rate, outp->dcb->dpconf.link_bw);
154 for (int i = 0; i < ARRAY_SIZE(rates); i++) {
155 if (rates[i] <= max_rate) {
156 outp->dp.rate[outp->dp.rate_nr].dpcd = -1;
157 outp->dp.rate[outp->dp.rate_nr].rate = rates[i];
162 if (WARN_ON(!outp->dp.rate_nr))
166 ret = nvif_outp_dp_rates(&outp->outp, outp->dp.rate, outp->dp.rate_nr);
170 for (int i = 0; i < outp->dp.rate_nr; i++) {
171 u32 link_bw = outp->dp.rate[i].rate;
173 if (link_bw > outp->dp.link_bw)
174 outp->dp.link_bw = link_bw;
177 ret = drm_dp_read_desc(aux, &outp->dp.desc, drm_dp_is_branch(dpcd));
182 mstm = outp->dp.mstm;
184 mstm->can_mst = drm_dp_read_mst_cap(aux, dpcd);
187 if (nouveau_dp_has_sink_count(connector, outp)) {
188 ret = drm_dp_read_sink_count(aux);
192 outp->dp.sink_count = ret;
195 * Dongle connected, but no display. Don't bother reading
196 * downstream port info
198 if (!outp->dp.sink_count)
199 return connector_status_disconnected;
202 ret = drm_dp_read_downstream_info(aux, dpcd,
203 outp->dp.downstream_ports);
207 status = connector_status_connected;
209 if (status != connector_status_connected) {
210 /* Clear any cached info */
211 outp->dp.sink_count = 0;
217 nouveau_dp_detect(struct nouveau_connector *nv_connector,
218 struct nouveau_encoder *nv_encoder)
220 struct drm_device *dev = nv_encoder->base.base.dev;
221 struct nouveau_drm *drm = nouveau_drm(dev);
222 struct drm_connector *connector = &nv_connector->base;
223 struct nv50_mstm *mstm = nv_encoder->dp.mstm;
224 enum drm_connector_status status;
225 u8 *dpcd = nv_encoder->dp.dpcd;
226 int ret = NOUVEAU_DP_NONE, hpd;
228 /* If we've already read the DPCD on an eDP device, we don't need to
229 * reread it as it won't change
231 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
232 dpcd[DP_DPCD_REV] != 0)
233 return NOUVEAU_DP_SST;
235 mutex_lock(&nv_encoder->dp.hpd_irq_lock);
237 /* If we're not ready to handle MST state changes yet, just
238 * report the last status of the connector. We'll reprobe it
239 * once we've resumed.
241 if (mstm->suspended) {
243 ret = NOUVEAU_DP_MST;
244 else if (connector->status ==
245 connector_status_connected)
246 ret = NOUVEAU_DP_SST;
252 hpd = nvif_outp_detect(&nv_encoder->outp);
253 if (hpd == NOT_PRESENT) {
254 nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
257 nvif_outp_dp_aux_pwr(&nv_encoder->outp, true);
259 status = nouveau_dp_probe_dpcd(nv_connector, nv_encoder);
260 if (status == connector_status_disconnected) {
261 nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
265 /* If we're in MST mode, we're done here */
266 if (mstm && mstm->can_mst && mstm->is_mst) {
267 ret = NOUVEAU_DP_MST;
271 NV_DEBUG(drm, "sink dpcd version: 0x%02x\n", dpcd[DP_DPCD_REV]);
272 for (int i = 0; i < nv_encoder->dp.rate_nr; i++)
273 NV_DEBUG(drm, "sink rate %d: %d\n", i, nv_encoder->dp.rate[i].rate);
275 NV_DEBUG(drm, "encoder: %dx%d\n", nv_encoder->dcb->dpconf.link_nr,
276 nv_encoder->dcb->dpconf.link_bw);
277 NV_DEBUG(drm, "maximum: %dx%d\n", nv_encoder->dp.link_nr,
278 nv_encoder->dp.link_bw);
280 if (mstm && mstm->can_mst) {
281 ret = nv50_mstm_detect(nv_encoder);
283 ret = NOUVEAU_DP_MST;
285 } else if (ret != 0) {
286 nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
290 ret = NOUVEAU_DP_SST;
293 if (mstm && !mstm->suspended && ret != NOUVEAU_DP_MST)
294 nv50_mstm_remove(mstm);
296 mutex_unlock(&nv_encoder->dp.hpd_irq_lock);
301 nouveau_dp_power_down(struct nouveau_encoder *outp)
303 struct drm_dp_aux *aux = &outp->conn->aux;
307 mutex_lock(&outp->dp.hpd_irq_lock);
309 ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
311 pwr &= ~DP_SET_POWER_MASK;
312 pwr |= DP_SET_POWER_D3;
313 drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
317 mutex_unlock(&outp->dp.hpd_irq_lock);
321 nouveau_dp_train_link(struct nouveau_encoder *outp, bool retrain)
323 struct drm_dp_aux *aux = &outp->conn->aux;
324 bool post_lt = false;
325 int ret, retries = 0;
327 if ( (outp->dp.dpcd[DP_MAX_LANE_COUNT] & 0x20) &&
328 !(outp->dp.dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED))
332 ret = nvif_outp_dp_train(&outp->outp, outp->dp.dpcd,
343 u8 stat[DP_LINK_STATUS_SIZE];
345 u8 time = 0, adjusts = 0, tmp;
347 ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat);
352 if (!drm_dp_channel_eq_ok(stat, outp->dp.lt.nr)) {
357 if (!(stat[2] & 0x02))
363 memcpy(prev, &stat[4], sizeof(prev));
364 ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat);
368 if (!memcmp(prev, &stat[4], sizeof(prev))) {
377 for (int i = 0; i < outp->dp.lt.nr; i++) {
378 pe[i] = drm_dp_get_adjust_request_pre_emphasis(stat, i) >>
379 DP_TRAIN_PRE_EMPHASIS_SHIFT;
380 vs[i] = drm_dp_get_adjust_request_voltage(stat, i) >>
381 DP_TRAIN_VOLTAGE_SWING_SHIFT;
384 ret = nvif_outp_dp_drive(&outp->outp, outp->dp.lt.nr, pe, vs);
392 if (drm_dp_dpcd_readb(aux, DP_LANE_COUNT_SET, &tmp) == 1) {
394 drm_dp_dpcd_writeb(aux, DP_LANE_COUNT_SET, tmp);
398 if (ret == 1 && retries++ < 3)
405 nouveau_dp_train(struct nouveau_encoder *outp, bool mst, u32 khz, u8 bpc)
407 struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
408 struct drm_dp_aux *aux = &outp->conn->aux;
414 min_rate = outp->dp.link_nr * outp->dp.rate[0].rate;
416 min_rate = DIV_ROUND_UP(khz * bpc * 3, 8);
418 NV_DEBUG(drm, "%s link training (mst:%d min_rate:%d)\n",
419 outp->base.base.name, mst, min_rate);
421 mutex_lock(&outp->dp.hpd_irq_lock);
423 if (drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr) == 1) {
424 if ((pwr & DP_SET_POWER_MASK) != DP_SET_POWER_D0) {
425 pwr &= ~DP_SET_POWER_MASK;
426 pwr |= DP_SET_POWER_D0;
427 drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
431 for (int nr = outp->dp.link_nr; nr; nr >>= 1) {
432 for (int rate = 0; rate < outp->dp.rate_nr; rate++) {
433 if (outp->dp.rate[rate].rate * nr >= min_rate) {
435 outp->dp.lt.bw = outp->dp.rate[rate].rate;
436 outp->dp.lt.mst = mst;
437 if (nouveau_dp_train_link(outp, false))
445 mutex_unlock(&outp->dp.hpd_irq_lock);
450 nouveau_dp_link_check_locked(struct nouveau_encoder *outp)
452 u8 link_status[DP_LINK_STATUS_SIZE];
454 if (!outp || !outp->dp.lt.nr)
457 if (drm_dp_dpcd_read_phy_link_status(&outp->conn->aux, DP_PHY_DPRX, link_status) < 0)
460 if (drm_dp_channel_eq_ok(link_status, outp->dp.lt.nr))
463 return nouveau_dp_train_link(outp, true);
467 nouveau_dp_link_check(struct nouveau_connector *nv_connector)
469 struct nouveau_encoder *outp = nv_connector->dp_encoder;
473 mutex_lock(&outp->dp.hpd_irq_lock);
475 link_ok = nouveau_dp_link_check_locked(outp);
476 mutex_unlock(&outp->dp.hpd_irq_lock);
483 nouveau_dp_irq(struct work_struct *work)
485 struct nouveau_connector *nv_connector =
486 container_of(work, typeof(*nv_connector), irq_work);
487 struct drm_connector *connector = &nv_connector->base;
488 struct nouveau_encoder *outp = find_encoder(connector, DCB_OUTPUT_DP);
489 struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
490 struct nv50_mstm *mstm;
497 mstm = outp->dp.mstm;
498 NV_DEBUG(drm, "service %s\n", connector->name);
500 mutex_lock(&outp->dp.hpd_irq_lock);
502 if (mstm && mstm->is_mst) {
503 if (!nv50_mstm_service(drm, nv_connector, mstm))
504 hpd |= NVIF_CONN_EVENT_V0_UNPLUG;
506 drm_dp_cec_irq(&nv_connector->aux);
508 if (nouveau_dp_has_sink_count(connector, outp)) {
509 ret = drm_dp_read_sink_count(&nv_connector->aux);
510 if (ret != outp->dp.sink_count)
511 hpd |= NVIF_CONN_EVENT_V0_PLUG;
513 outp->dp.sink_count = ret;
517 mutex_unlock(&outp->dp.hpd_irq_lock);
519 nouveau_connector_hpd(nv_connector, NVIF_CONN_EVENT_V0_IRQ | hpd);
523 * - Validate against the DP caps advertised by the GPU (we don't check these
527 nv50_dp_mode_valid(struct nouveau_encoder *outp,
528 const struct drm_display_mode *mode,
531 const unsigned int min_clock = 25000;
532 unsigned int max_rate, mode_rate, ds_max_dotclock, clock = mode->clock;
533 /* Check with the minmum bpc always, so we can advertise better modes.
534 * In particlar not doing this causes modes to be dropped on HDR
535 * displays as we might check with a bpc of 16 even.
537 const u8 bpp = 6 * 3;
539 if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
540 return MODE_NO_INTERLACE;
542 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
545 max_rate = outp->dp.link_nr * outp->dp.link_bw;
546 mode_rate = DIV_ROUND_UP(clock * bpp, 8);
547 if (mode_rate > max_rate)
548 return MODE_CLOCK_HIGH;
550 ds_max_dotclock = drm_dp_downstream_max_dotclock(outp->dp.dpcd, outp->dp.downstream_ports);
551 if (ds_max_dotclock && clock > ds_max_dotclock)
552 return MODE_CLOCK_HIGH;
554 if (clock < min_clock)
555 return MODE_CLOCK_LOW;