1 /* SPDX-License-Identifier: MIT */
3 * Copyright (C) 2017 Google, Inc.
6 * Sean Paul <seanpaul@chromium.org>
9 #include <linux/component.h>
10 #include <linux/i2c.h>
11 #include <linux/random.h>
13 #include <drm/drm_hdcp.h>
14 #include <drm/i915_component.h>
17 #include "intel_drv.h"
18 #include "intel_hdcp.h"
20 #define KEY_LOAD_TRIES 5
21 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
22 #define HDCP2_LC_RETRY_CNT 3
25 bool intel_hdcp_is_ksv_valid(u8 *ksv)
28 /* KSV has 20 1's and 20 0's */
29 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
30 ones += hweight8(ksv[i]);
38 int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
39 const struct intel_hdcp_shim *shim, u8 *bksv)
41 int ret, i, tries = 2;
43 /* HDCP spec states that we must retry the bksv if it is invalid */
44 for (i = 0; i < tries; i++) {
45 ret = shim->read_bksv(intel_dig_port, bksv);
48 if (intel_hdcp_is_ksv_valid(bksv))
52 DRM_DEBUG_KMS("Bksv is invalid\n");
59 /* Is HDCP1.4 capable on Platform and Sink */
60 bool intel_hdcp_capable(struct intel_connector *connector)
62 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
63 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
70 if (shim->hdcp_capable) {
71 shim->hdcp_capable(intel_dig_port, &capable);
73 if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
80 /* Is HDCP2.2 capable on Platform and Sink */
81 static bool intel_hdcp2_capable(struct intel_connector *connector)
83 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
84 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
85 struct intel_hdcp *hdcp = &connector->hdcp;
88 /* I915 support for HDCP2.2 */
89 if (!hdcp->hdcp2_supported)
92 /* MEI interface is solid */
93 mutex_lock(&dev_priv->hdcp_comp_mutex);
94 if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
95 mutex_unlock(&dev_priv->hdcp_comp_mutex);
98 mutex_unlock(&dev_priv->hdcp_comp_mutex);
100 /* Sink's capability for HDCP2.2 */
101 hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
106 static inline bool intel_hdcp_in_use(struct intel_connector *connector)
108 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
109 enum port port = connector->encoder->port;
112 reg = I915_READ(PORT_HDCP_STATUS(port));
113 return reg & HDCP_STATUS_ENC;
116 static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
118 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
119 enum port port = connector->encoder->port;
122 reg = I915_READ(HDCP2_STATUS_DDI(port));
123 return reg & LINK_ENCRYPTION_STATUS;
126 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
127 const struct intel_hdcp_shim *shim)
132 /* Poll for ksv list ready (spec says max time allowed is 5s) */
133 ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
135 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
147 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
149 struct i915_power_domains *power_domains = &dev_priv->power_domains;
150 struct i915_power_well *power_well;
151 enum i915_power_well_id id;
152 bool enabled = false;
155 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
156 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
158 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
159 id = HSW_DISP_PW_GLOBAL;
163 mutex_lock(&power_domains->lock);
165 /* PG1 (power well #1) needs to be enabled */
166 for_each_power_well(dev_priv, power_well) {
167 if (power_well->desc->id == id) {
168 enabled = power_well->desc->ops->is_enabled(dev_priv,
173 mutex_unlock(&power_domains->lock);
176 * Another req for hdcp key loadability is enabled state of pll for
177 * cdclk. Without active crtc we wont land here. So we are assuming that
178 * cdclk is already on.
184 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
186 I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
187 I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
188 HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
191 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
196 val = I915_READ(HDCP_KEY_STATUS);
197 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
201 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
202 * out of reset. So if Key is not already loaded, its an error state.
204 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
205 if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
209 * Initiate loading the HDCP key from fuses.
211 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
212 * platforms except BXT and GLK, differ in the key load trigger process
213 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
215 if (IS_GEN9_BC(dev_priv)) {
216 mutex_lock(&dev_priv->pcu_lock);
217 ret = sandybridge_pcode_write(dev_priv,
218 SKL_PCODE_LOAD_HDCP_KEYS, 1);
219 mutex_unlock(&dev_priv->pcu_lock);
221 DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
226 I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
229 /* Wait for the keys to load (500us) */
230 ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
231 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
235 else if (!(val & HDCP_KEY_LOAD_STATUS))
238 /* Send Aksv over to PCH display for use in authentication */
239 I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
244 /* Returns updated SHA-1 index */
245 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
247 I915_WRITE(HDCP_SHA_TEXT, sha_text);
248 if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
249 HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
250 DRM_ERROR("Timed out waiting for SHA1 ready\n");
257 u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
259 enum port port = intel_dig_port->base.port;
262 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
264 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
266 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
268 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
270 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
274 DRM_ERROR("Unknown port %d\n", port);
279 int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
280 const struct intel_hdcp_shim *shim,
281 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
283 struct drm_i915_private *dev_priv;
284 u32 vprime, sha_text, sha_leftovers, rep_ctl;
285 int ret, i, j, sha_idx;
287 dev_priv = intel_dig_port->base.base.dev->dev_private;
289 /* Process V' values from the receiver */
290 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
291 ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
294 I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
298 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
299 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
300 * stream is written via the HDCP_SHA_TEXT register in 32-bit
301 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
302 * index will keep track of our progress through the 64 bytes as well as
303 * helping us work the 40-bit KSVs through our 32-bit register.
305 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
310 rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
311 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
312 for (i = 0; i < num_downstream; i++) {
313 unsigned int sha_empty;
314 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
316 /* Fill up the empty slots in sha_text and write it out */
317 sha_empty = sizeof(sha_text) - sha_leftovers;
318 for (j = 0; j < sha_empty; j++)
319 sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
321 ret = intel_write_sha_text(dev_priv, sha_text);
325 /* Programming guide writes this every 64 bytes */
326 sha_idx += sizeof(sha_text);
328 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
330 /* Store the leftover bytes from the ksv in sha_text */
331 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
333 for (j = 0; j < sha_leftovers; j++)
334 sha_text |= ksv[sha_empty + j] <<
335 ((sizeof(sha_text) - j - 1) * 8);
338 * If we still have room in sha_text for more data, continue.
339 * Otherwise, write it out immediately.
341 if (sizeof(sha_text) > sha_leftovers)
344 ret = intel_write_sha_text(dev_priv, sha_text);
349 sha_idx += sizeof(sha_text);
353 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
354 * bytes are leftover from the last ksv, we might be able to fit them
355 * all in sha_text (first 2 cases), or we might need to split them up
356 * into 2 writes (last 2 cases).
358 if (sha_leftovers == 0) {
359 /* Write 16 bits of text, 16 bits of M0 */
360 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
361 ret = intel_write_sha_text(dev_priv,
362 bstatus[0] << 8 | bstatus[1]);
365 sha_idx += sizeof(sha_text);
367 /* Write 32 bits of M0 */
368 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
369 ret = intel_write_sha_text(dev_priv, 0);
372 sha_idx += sizeof(sha_text);
374 /* Write 16 bits of M0 */
375 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
376 ret = intel_write_sha_text(dev_priv, 0);
379 sha_idx += sizeof(sha_text);
381 } else if (sha_leftovers == 1) {
382 /* Write 24 bits of text, 8 bits of M0 */
383 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
384 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
385 /* Only 24-bits of data, must be in the LSB */
386 sha_text = (sha_text & 0xffffff00) >> 8;
387 ret = intel_write_sha_text(dev_priv, sha_text);
390 sha_idx += sizeof(sha_text);
392 /* Write 32 bits of M0 */
393 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
394 ret = intel_write_sha_text(dev_priv, 0);
397 sha_idx += sizeof(sha_text);
399 /* Write 24 bits of M0 */
400 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
401 ret = intel_write_sha_text(dev_priv, 0);
404 sha_idx += sizeof(sha_text);
406 } else if (sha_leftovers == 2) {
407 /* Write 32 bits of text */
408 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
409 sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
410 ret = intel_write_sha_text(dev_priv, sha_text);
413 sha_idx += sizeof(sha_text);
415 /* Write 64 bits of M0 */
416 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
417 for (i = 0; i < 2; i++) {
418 ret = intel_write_sha_text(dev_priv, 0);
421 sha_idx += sizeof(sha_text);
423 } else if (sha_leftovers == 3) {
424 /* Write 32 bits of text */
425 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
426 sha_text |= bstatus[0] << 24;
427 ret = intel_write_sha_text(dev_priv, sha_text);
430 sha_idx += sizeof(sha_text);
432 /* Write 8 bits of text, 24 bits of M0 */
433 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
434 ret = intel_write_sha_text(dev_priv, bstatus[1]);
437 sha_idx += sizeof(sha_text);
439 /* Write 32 bits of M0 */
440 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
441 ret = intel_write_sha_text(dev_priv, 0);
444 sha_idx += sizeof(sha_text);
446 /* Write 8 bits of M0 */
447 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
448 ret = intel_write_sha_text(dev_priv, 0);
451 sha_idx += sizeof(sha_text);
453 DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
458 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
459 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
460 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
461 ret = intel_write_sha_text(dev_priv, 0);
464 sha_idx += sizeof(sha_text);
468 * Last write gets the length of the concatenation in bits. That is:
469 * - 5 bytes per device
470 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
472 sha_text = (num_downstream * 5 + 10) * 8;
473 ret = intel_write_sha_text(dev_priv, sha_text);
477 /* Tell the HW we're done with the hash and wait for it to ACK */
478 I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
479 if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
481 HDCP_SHA1_COMPLETE, 1)) {
482 DRM_ERROR("Timed out waiting for SHA1 complete\n");
485 if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
486 DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
493 /* Implements Part 2 of the HDCP authorization procedure */
495 int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
496 const struct intel_hdcp_shim *shim)
498 u8 bstatus[2], num_downstream, *ksv_fifo;
499 int ret, i, tries = 3;
501 ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
503 DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
507 ret = shim->read_bstatus(intel_dig_port, bstatus);
511 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
512 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
513 DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
518 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
519 * the HDCP encryption. That implies that repeater can't have its own
520 * display. As there is no consumption of encrypted content in the
521 * repeater with 0 downstream devices, we are failing the
524 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
525 if (num_downstream == 0)
528 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
532 ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
537 * When V prime mismatches, DP Spec mandates re-read of
538 * V prime atleast twice.
540 for (i = 0; i < tries; i++) {
541 ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
542 ksv_fifo, num_downstream,
549 DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
553 DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
561 /* Implements Part 1 of the HDCP authorization procedure */
562 static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
563 const struct intel_hdcp_shim *shim)
565 struct drm_i915_private *dev_priv;
567 unsigned long r0_prime_gen_start;
568 int ret, i, tries = 2;
571 u8 shim[DRM_HDCP_AN_LEN];
575 u8 shim[DRM_HDCP_KSV_LEN];
579 u8 shim[DRM_HDCP_RI_LEN];
581 bool repeater_present, hdcp_capable;
583 dev_priv = intel_dig_port->base.base.dev->dev_private;
585 port = intel_dig_port->base.port;
588 * Detects whether the display is HDCP capable. Although we check for
589 * valid Bksv below, the HDCP over DP spec requires that we check
590 * whether the display supports HDCP before we write An. For HDMI
591 * displays, this is not necessary.
593 if (shim->hdcp_capable) {
594 ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
598 DRM_DEBUG_KMS("Panel is not HDCP capable\n");
603 /* Initialize An with 2 random values and acquire it */
604 for (i = 0; i < 2; i++)
605 I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
606 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
608 /* Wait for An to be acquired */
609 if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
610 HDCP_STATUS_AN_READY,
611 HDCP_STATUS_AN_READY, 1)) {
612 DRM_ERROR("Timed out waiting for An\n");
616 an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
617 an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
618 ret = shim->write_an_aksv(intel_dig_port, an.shim);
622 r0_prime_gen_start = jiffies;
624 memset(&bksv, 0, sizeof(bksv));
626 ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
630 I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
631 I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
633 ret = shim->repeater_present(intel_dig_port, &repeater_present);
636 if (repeater_present)
637 I915_WRITE(HDCP_REP_CTL,
638 intel_hdcp_get_repeater_ctl(intel_dig_port));
640 ret = shim->toggle_signalling(intel_dig_port, true);
644 I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
646 /* Wait for R0 ready */
647 if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
648 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
649 DRM_ERROR("Timed out waiting for R0 ready\n");
654 * Wait for R0' to become available. The spec says 100ms from Aksv, but
655 * some monitors can take longer than this. We'll set the timeout at
656 * 300ms just to be sure.
658 * On DP, there's an R0_READY bit available but no such bit
659 * exists on HDMI. Since the upper-bound is the same, we'll just do
660 * the stupid thing instead of polling on one and not the other.
662 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
667 * DP HDCP Spec mandates the two more reattempt to read R0, incase
670 for (i = 0; i < tries; i++) {
672 ret = shim->read_ri_prime(intel_dig_port, ri.shim);
675 I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
677 /* Wait for Ri prime match */
678 if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
679 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
684 DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
685 I915_READ(PORT_HDCP_STATUS(port)));
689 /* Wait for encryption confirmation */
690 if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
691 HDCP_STATUS_ENC, HDCP_STATUS_ENC,
692 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
693 DRM_ERROR("Timed out waiting for encryption\n");
698 * XXX: If we have MST-connected devices, we need to enable encryption
702 if (repeater_present)
703 return intel_hdcp_auth_downstream(intel_dig_port, shim);
705 DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
709 static int _intel_hdcp_disable(struct intel_connector *connector)
711 struct intel_hdcp *hdcp = &connector->hdcp;
712 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
713 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
714 enum port port = intel_dig_port->base.port;
717 DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
718 connector->base.name, connector->base.base.id);
720 hdcp->hdcp_encrypted = false;
721 I915_WRITE(PORT_HDCP_CONF(port), 0);
722 if (intel_wait_for_register(&dev_priv->uncore,
723 PORT_HDCP_STATUS(port), ~0, 0,
724 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
725 DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
729 ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
731 DRM_ERROR("Failed to disable HDCP signalling\n");
735 DRM_DEBUG_KMS("HDCP is disabled\n");
739 static int _intel_hdcp_enable(struct intel_connector *connector)
741 struct intel_hdcp *hdcp = &connector->hdcp;
742 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
743 int i, ret, tries = 3;
745 DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
746 connector->base.name, connector->base.base.id);
748 if (!hdcp_key_loadable(dev_priv)) {
749 DRM_ERROR("HDCP key Load is not possible\n");
753 for (i = 0; i < KEY_LOAD_TRIES; i++) {
754 ret = intel_hdcp_load_keys(dev_priv);
757 intel_hdcp_clear_keys(dev_priv);
760 DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
764 /* Incase of authentication failures, HDCP spec expects reauth. */
765 for (i = 0; i < tries; i++) {
766 ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
768 hdcp->hdcp_encrypted = true;
772 DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
774 /* Ensuring HDCP encryption and signalling are stopped. */
775 _intel_hdcp_disable(connector);
778 DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
783 struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
785 return container_of(hdcp, struct intel_connector, hdcp);
788 /* Implements Part 3 of the HDCP authorization procedure */
789 static int intel_hdcp_check_link(struct intel_connector *connector)
791 struct intel_hdcp *hdcp = &connector->hdcp;
792 struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
793 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
794 enum port port = intel_dig_port->base.port;
797 mutex_lock(&hdcp->mutex);
799 /* Check_link valid only when HDCP1.4 is enabled */
800 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
801 !hdcp->hdcp_encrypted) {
806 if (WARN_ON(!intel_hdcp_in_use(connector))) {
807 DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
808 connector->base.name, connector->base.base.id,
809 I915_READ(PORT_HDCP_STATUS(port)));
811 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
812 schedule_work(&hdcp->prop_work);
816 if (hdcp->shim->check_link(intel_dig_port)) {
817 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
818 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
819 schedule_work(&hdcp->prop_work);
824 DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
825 connector->base.name, connector->base.base.id);
827 ret = _intel_hdcp_disable(connector);
829 DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
830 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
831 schedule_work(&hdcp->prop_work);
835 ret = _intel_hdcp_enable(connector);
837 DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
838 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
839 schedule_work(&hdcp->prop_work);
844 mutex_unlock(&hdcp->mutex);
848 static void intel_hdcp_prop_work(struct work_struct *work)
850 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
852 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
853 struct drm_device *dev = connector->base.dev;
854 struct drm_connector_state *state;
856 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
857 mutex_lock(&hdcp->mutex);
860 * This worker is only used to flip between ENABLED/DESIRED. Either of
861 * those to UNDESIRED is handled by core. If value == UNDESIRED,
862 * we're running just after hdcp has been disabled, so just exit
864 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
865 state = connector->base.state;
866 state->content_protection = hdcp->value;
869 mutex_unlock(&hdcp->mutex);
870 drm_modeset_unlock(&dev->mode_config.connection_mutex);
873 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
875 /* PORT E doesn't have HDCP, and PORT F is disabled */
876 return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
880 hdcp2_prepare_ake_init(struct intel_connector *connector,
881 struct hdcp2_ake_init *ake_data)
883 struct hdcp_port_data *data = &connector->hdcp.port_data;
884 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
885 struct i915_hdcp_comp_master *comp;
888 mutex_lock(&dev_priv->hdcp_comp_mutex);
889 comp = dev_priv->hdcp_master;
891 if (!comp || !comp->ops) {
892 mutex_unlock(&dev_priv->hdcp_comp_mutex);
896 ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
898 DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
899 mutex_unlock(&dev_priv->hdcp_comp_mutex);
905 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
906 struct hdcp2_ake_send_cert *rx_cert,
908 struct hdcp2_ake_no_stored_km *ek_pub_km,
911 struct hdcp_port_data *data = &connector->hdcp.port_data;
912 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
913 struct i915_hdcp_comp_master *comp;
916 mutex_lock(&dev_priv->hdcp_comp_mutex);
917 comp = dev_priv->hdcp_master;
919 if (!comp || !comp->ops) {
920 mutex_unlock(&dev_priv->hdcp_comp_mutex);
924 ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
928 DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
929 mutex_unlock(&dev_priv->hdcp_comp_mutex);
934 static int hdcp2_verify_hprime(struct intel_connector *connector,
935 struct hdcp2_ake_send_hprime *rx_hprime)
937 struct hdcp_port_data *data = &connector->hdcp.port_data;
938 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
939 struct i915_hdcp_comp_master *comp;
942 mutex_lock(&dev_priv->hdcp_comp_mutex);
943 comp = dev_priv->hdcp_master;
945 if (!comp || !comp->ops) {
946 mutex_unlock(&dev_priv->hdcp_comp_mutex);
950 ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
952 DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
953 mutex_unlock(&dev_priv->hdcp_comp_mutex);
959 hdcp2_store_pairing_info(struct intel_connector *connector,
960 struct hdcp2_ake_send_pairing_info *pairing_info)
962 struct hdcp_port_data *data = &connector->hdcp.port_data;
963 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
964 struct i915_hdcp_comp_master *comp;
967 mutex_lock(&dev_priv->hdcp_comp_mutex);
968 comp = dev_priv->hdcp_master;
970 if (!comp || !comp->ops) {
971 mutex_unlock(&dev_priv->hdcp_comp_mutex);
975 ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
977 DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
978 mutex_unlock(&dev_priv->hdcp_comp_mutex);
984 hdcp2_prepare_lc_init(struct intel_connector *connector,
985 struct hdcp2_lc_init *lc_init)
987 struct hdcp_port_data *data = &connector->hdcp.port_data;
988 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
989 struct i915_hdcp_comp_master *comp;
992 mutex_lock(&dev_priv->hdcp_comp_mutex);
993 comp = dev_priv->hdcp_master;
995 if (!comp || !comp->ops) {
996 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1000 ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1002 DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
1003 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1009 hdcp2_verify_lprime(struct intel_connector *connector,
1010 struct hdcp2_lc_send_lprime *rx_lprime)
1012 struct hdcp_port_data *data = &connector->hdcp.port_data;
1013 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1014 struct i915_hdcp_comp_master *comp;
1017 mutex_lock(&dev_priv->hdcp_comp_mutex);
1018 comp = dev_priv->hdcp_master;
1020 if (!comp || !comp->ops) {
1021 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1025 ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1027 DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
1028 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1033 static int hdcp2_prepare_skey(struct intel_connector *connector,
1034 struct hdcp2_ske_send_eks *ske_data)
1036 struct hdcp_port_data *data = &connector->hdcp.port_data;
1037 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1038 struct i915_hdcp_comp_master *comp;
1041 mutex_lock(&dev_priv->hdcp_comp_mutex);
1042 comp = dev_priv->hdcp_master;
1044 if (!comp || !comp->ops) {
1045 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1049 ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1051 DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
1052 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1058 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1059 struct hdcp2_rep_send_receiverid_list
1061 struct hdcp2_rep_send_ack *rep_send_ack)
1063 struct hdcp_port_data *data = &connector->hdcp.port_data;
1064 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1065 struct i915_hdcp_comp_master *comp;
1068 mutex_lock(&dev_priv->hdcp_comp_mutex);
1069 comp = dev_priv->hdcp_master;
1071 if (!comp || !comp->ops) {
1072 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1076 ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1080 DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
1081 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1087 hdcp2_verify_mprime(struct intel_connector *connector,
1088 struct hdcp2_rep_stream_ready *stream_ready)
1090 struct hdcp_port_data *data = &connector->hdcp.port_data;
1091 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1092 struct i915_hdcp_comp_master *comp;
1095 mutex_lock(&dev_priv->hdcp_comp_mutex);
1096 comp = dev_priv->hdcp_master;
1098 if (!comp || !comp->ops) {
1099 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1103 ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1105 DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
1106 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1111 static int hdcp2_authenticate_port(struct intel_connector *connector)
1113 struct hdcp_port_data *data = &connector->hdcp.port_data;
1114 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1115 struct i915_hdcp_comp_master *comp;
1118 mutex_lock(&dev_priv->hdcp_comp_mutex);
1119 comp = dev_priv->hdcp_master;
1121 if (!comp || !comp->ops) {
1122 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1126 ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1128 DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
1129 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1134 static int hdcp2_close_mei_session(struct intel_connector *connector)
1136 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1137 struct i915_hdcp_comp_master *comp;
1140 mutex_lock(&dev_priv->hdcp_comp_mutex);
1141 comp = dev_priv->hdcp_master;
1143 if (!comp || !comp->ops) {
1144 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1148 ret = comp->ops->close_hdcp_session(comp->mei_dev,
1149 &connector->hdcp.port_data);
1150 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1155 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1157 return hdcp2_close_mei_session(connector);
1160 /* Authentication flow starts from here */
1161 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1163 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1164 struct intel_hdcp *hdcp = &connector->hdcp;
1166 struct hdcp2_ake_init ake_init;
1167 struct hdcp2_ake_send_cert send_cert;
1168 struct hdcp2_ake_no_stored_km no_stored_km;
1169 struct hdcp2_ake_send_hprime send_hprime;
1170 struct hdcp2_ake_send_pairing_info pairing_info;
1172 const struct intel_hdcp_shim *shim = hdcp->shim;
1176 /* Init for seq_num */
1177 hdcp->seq_num_v = 0;
1178 hdcp->seq_num_m = 0;
1180 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1184 ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
1185 sizeof(msgs.ake_init));
1189 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
1190 &msgs.send_cert, sizeof(msgs.send_cert));
1194 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
1197 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1200 * Here msgs.no_stored_km will hold msgs corresponding to the km
1203 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1205 &msgs.no_stored_km, &size);
1209 ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
1213 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1214 &msgs.send_hprime, sizeof(msgs.send_hprime));
1218 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1222 if (!hdcp->is_paired) {
1223 /* Pairing is required */
1224 ret = shim->read_2_2_msg(intel_dig_port,
1225 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1227 sizeof(msgs.pairing_info));
1231 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1234 hdcp->is_paired = true;
1240 static int hdcp2_locality_check(struct intel_connector *connector)
1242 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1243 struct intel_hdcp *hdcp = &connector->hdcp;
1245 struct hdcp2_lc_init lc_init;
1246 struct hdcp2_lc_send_lprime send_lprime;
1248 const struct intel_hdcp_shim *shim = hdcp->shim;
1249 int tries = HDCP2_LC_RETRY_CNT, ret, i;
1251 for (i = 0; i < tries; i++) {
1252 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1256 ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
1257 sizeof(msgs.lc_init));
1261 ret = shim->read_2_2_msg(intel_dig_port,
1262 HDCP_2_2_LC_SEND_LPRIME,
1264 sizeof(msgs.send_lprime));
1268 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1276 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1278 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1279 struct intel_hdcp *hdcp = &connector->hdcp;
1280 struct hdcp2_ske_send_eks send_eks;
1283 ret = hdcp2_prepare_skey(connector, &send_eks);
1287 ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
1296 int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1298 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1299 struct intel_hdcp *hdcp = &connector->hdcp;
1301 struct hdcp2_rep_stream_manage stream_manage;
1302 struct hdcp2_rep_stream_ready stream_ready;
1304 const struct intel_hdcp_shim *shim = hdcp->shim;
1307 /* Prepare RepeaterAuth_Stream_Manage msg */
1308 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1309 drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1311 /* K no of streams is fixed as 1. Stored as big-endian. */
1312 msgs.stream_manage.k = cpu_to_be16(1);
1314 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1315 msgs.stream_manage.streams[0].stream_id = 0;
1316 msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1318 /* Send it to Repeater */
1319 ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
1320 sizeof(msgs.stream_manage));
1324 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
1325 &msgs.stream_ready, sizeof(msgs.stream_ready));
1329 hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1330 hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1332 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1338 if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1339 DRM_DEBUG_KMS("seq_num_m roll over.\n");
1347 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1349 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1350 struct intel_hdcp *hdcp = &connector->hdcp;
1352 struct hdcp2_rep_send_receiverid_list recvid_list;
1353 struct hdcp2_rep_send_ack rep_ack;
1355 const struct intel_hdcp_shim *shim = hdcp->shim;
1360 ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1361 &msgs.recvid_list, sizeof(msgs.recvid_list));
1365 rx_info = msgs.recvid_list.rx_info;
1367 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1368 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1369 DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
1373 /* Converting and Storing the seq_num_v to local variable as DWORD */
1374 seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
1376 if (seq_num_v < hdcp->seq_num_v) {
1377 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1378 DRM_DEBUG_KMS("Seq_num_v roll over.\n");
1382 ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1388 hdcp->seq_num_v = seq_num_v;
1389 ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
1390 sizeof(msgs.rep_ack));
1397 static int hdcp2_authenticate_repeater(struct intel_connector *connector)
1401 ret = hdcp2_authenticate_repeater_topology(connector);
1405 return hdcp2_propagate_stream_management_info(connector);
1408 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1410 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1411 struct intel_hdcp *hdcp = &connector->hdcp;
1412 const struct intel_hdcp_shim *shim = hdcp->shim;
1415 ret = hdcp2_authentication_key_exchange(connector);
1417 DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
1421 ret = hdcp2_locality_check(connector);
1423 DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
1427 ret = hdcp2_session_key_exchange(connector);
1429 DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
1433 if (shim->config_stream_type) {
1434 ret = shim->config_stream_type(intel_dig_port,
1436 hdcp->content_type);
1441 if (hdcp->is_repeater) {
1442 ret = hdcp2_authenticate_repeater(connector);
1444 DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
1449 hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1450 ret = hdcp2_authenticate_port(connector);
1457 static int hdcp2_enable_encryption(struct intel_connector *connector)
1459 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1460 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1461 struct intel_hdcp *hdcp = &connector->hdcp;
1462 enum port port = connector->encoder->port;
1465 WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
1467 if (hdcp->shim->toggle_signalling) {
1468 ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
1470 DRM_ERROR("Failed to enable HDCP signalling. %d\n",
1476 if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
1477 /* Link is Authenticated. Now set for Encryption */
1478 I915_WRITE(HDCP2_CTL_DDI(port),
1479 I915_READ(HDCP2_CTL_DDI(port)) |
1480 CTL_LINK_ENCRYPTION_REQ);
1483 ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
1484 LINK_ENCRYPTION_STATUS,
1485 LINK_ENCRYPTION_STATUS,
1486 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1491 static int hdcp2_disable_encryption(struct intel_connector *connector)
1493 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1494 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1495 struct intel_hdcp *hdcp = &connector->hdcp;
1496 enum port port = connector->encoder->port;
1499 WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
1501 I915_WRITE(HDCP2_CTL_DDI(port),
1502 I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
1504 ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
1505 LINK_ENCRYPTION_STATUS, 0x0,
1506 ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1507 if (ret == -ETIMEDOUT)
1508 DRM_DEBUG_KMS("Disable Encryption Timedout");
1510 if (hdcp->shim->toggle_signalling) {
1511 ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
1513 DRM_ERROR("Failed to disable HDCP signalling. %d\n",
1522 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1524 int ret, i, tries = 3;
1526 for (i = 0; i < tries; i++) {
1527 ret = hdcp2_authenticate_sink(connector);
1531 /* Clearing the mei hdcp session */
1532 DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
1534 if (hdcp2_deauthenticate_port(connector) < 0)
1535 DRM_DEBUG_KMS("Port deauth failed.\n");
1540 * Ensuring the required 200mSec min time interval between
1541 * Session Key Exchange and encryption.
1543 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1544 ret = hdcp2_enable_encryption(connector);
1546 DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
1547 if (hdcp2_deauthenticate_port(connector) < 0)
1548 DRM_DEBUG_KMS("Port deauth failed.\n");
1555 static int _intel_hdcp2_enable(struct intel_connector *connector)
1557 struct intel_hdcp *hdcp = &connector->hdcp;
1560 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1561 connector->base.name, connector->base.base.id,
1562 hdcp->content_type);
1564 ret = hdcp2_authenticate_and_encrypt(connector);
1566 DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
1567 hdcp->content_type, ret);
1571 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
1572 connector->base.name, connector->base.base.id,
1573 hdcp->content_type);
1575 hdcp->hdcp2_encrypted = true;
1579 static int _intel_hdcp2_disable(struct intel_connector *connector)
1583 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
1584 connector->base.name, connector->base.base.id);
1586 ret = hdcp2_disable_encryption(connector);
1588 if (hdcp2_deauthenticate_port(connector) < 0)
1589 DRM_DEBUG_KMS("Port deauth failed.\n");
1591 connector->hdcp.hdcp2_encrypted = false;
1596 /* Implements the Link Integrity Check for HDCP2.2 */
1597 static int intel_hdcp2_check_link(struct intel_connector *connector)
1599 struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
1600 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1601 struct intel_hdcp *hdcp = &connector->hdcp;
1602 enum port port = connector->encoder->port;
1605 mutex_lock(&hdcp->mutex);
1607 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1608 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1609 !hdcp->hdcp2_encrypted) {
1614 if (WARN_ON(!intel_hdcp2_in_use(connector))) {
1615 DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
1616 I915_READ(HDCP2_STATUS_DDI(port)));
1618 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1619 schedule_work(&hdcp->prop_work);
1623 ret = hdcp->shim->check_2_2_link(intel_dig_port);
1624 if (ret == HDCP_LINK_PROTECTED) {
1625 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1626 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1627 schedule_work(&hdcp->prop_work);
1632 if (ret == HDCP_TOPOLOGY_CHANGE) {
1633 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1636 DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
1637 ret = hdcp2_authenticate_repeater_topology(connector);
1639 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1640 schedule_work(&hdcp->prop_work);
1643 DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
1644 connector->base.name, connector->base.base.id,
1647 DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
1648 connector->base.name, connector->base.base.id);
1651 ret = _intel_hdcp2_disable(connector);
1653 DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1654 connector->base.name, connector->base.base.id, ret);
1655 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1656 schedule_work(&hdcp->prop_work);
1660 ret = _intel_hdcp2_enable(connector);
1662 DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1663 connector->base.name, connector->base.base.id,
1665 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1666 schedule_work(&hdcp->prop_work);
1671 mutex_unlock(&hdcp->mutex);
1675 static void intel_hdcp_check_work(struct work_struct *work)
1677 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1680 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1682 if (!intel_hdcp2_check_link(connector))
1683 schedule_delayed_work(&hdcp->check_work,
1684 DRM_HDCP2_CHECK_PERIOD_MS);
1685 else if (!intel_hdcp_check_link(connector))
1686 schedule_delayed_work(&hdcp->check_work,
1687 DRM_HDCP_CHECK_PERIOD_MS);
1690 static int i915_hdcp_component_bind(struct device *i915_kdev,
1691 struct device *mei_kdev, void *data)
1693 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1695 DRM_DEBUG("I915 HDCP comp bind\n");
1696 mutex_lock(&dev_priv->hdcp_comp_mutex);
1697 dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1698 dev_priv->hdcp_master->mei_dev = mei_kdev;
1699 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1704 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1705 struct device *mei_kdev, void *data)
1707 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1709 DRM_DEBUG("I915 HDCP comp unbind\n");
1710 mutex_lock(&dev_priv->hdcp_comp_mutex);
1711 dev_priv->hdcp_master = NULL;
1712 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1715 static const struct component_ops i915_hdcp_component_ops = {
1716 .bind = i915_hdcp_component_bind,
1717 .unbind = i915_hdcp_component_unbind,
1720 static inline int initialize_hdcp_port_data(struct intel_connector *connector)
1722 struct intel_hdcp *hdcp = &connector->hdcp;
1723 struct hdcp_port_data *data = &hdcp->port_data;
1725 data->port = connector->encoder->port;
1726 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1727 data->protocol = (u8)hdcp->shim->protocol;
1731 data->streams = kcalloc(data->k,
1732 sizeof(struct hdcp2_streamid_type),
1734 if (!data->streams) {
1735 DRM_ERROR("Out of Memory\n");
1739 data->streams[0].stream_id = 0;
1740 data->streams[0].stream_type = hdcp->content_type;
1745 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
1747 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
1750 return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
1751 IS_KABYLAKE(dev_priv));
1754 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
1758 if (!is_hdcp2_supported(dev_priv))
1761 mutex_lock(&dev_priv->hdcp_comp_mutex);
1762 WARN_ON(dev_priv->hdcp_comp_added);
1764 dev_priv->hdcp_comp_added = true;
1765 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1766 ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
1767 I915_COMPONENT_HDCP);
1769 DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
1770 mutex_lock(&dev_priv->hdcp_comp_mutex);
1771 dev_priv->hdcp_comp_added = false;
1772 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1777 static void intel_hdcp2_init(struct intel_connector *connector)
1779 struct intel_hdcp *hdcp = &connector->hdcp;
1782 ret = initialize_hdcp_port_data(connector);
1784 DRM_DEBUG_KMS("Mei hdcp data init failed\n");
1788 hdcp->hdcp2_supported = true;
1791 int intel_hdcp_init(struct intel_connector *connector,
1792 const struct intel_hdcp_shim *shim)
1794 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1795 struct intel_hdcp *hdcp = &connector->hdcp;
1801 ret = drm_connector_attach_content_protection_property(&connector->base);
1806 mutex_init(&hdcp->mutex);
1807 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
1808 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
1810 if (is_hdcp2_supported(dev_priv))
1811 intel_hdcp2_init(connector);
1812 init_waitqueue_head(&hdcp->cp_irq_queue);
1817 int intel_hdcp_enable(struct intel_connector *connector)
1819 struct intel_hdcp *hdcp = &connector->hdcp;
1820 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
1826 mutex_lock(&hdcp->mutex);
1827 WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
1830 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
1831 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
1833 if (intel_hdcp2_capable(connector)) {
1834 ret = _intel_hdcp2_enable(connector);
1836 check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
1839 /* When HDCP2.2 fails, HDCP1.4 will be attempted */
1840 if (ret && intel_hdcp_capable(connector)) {
1841 ret = _intel_hdcp_enable(connector);
1845 schedule_delayed_work(&hdcp->check_work, check_link_interval);
1846 hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
1847 schedule_work(&hdcp->prop_work);
1850 mutex_unlock(&hdcp->mutex);
1854 int intel_hdcp_disable(struct intel_connector *connector)
1856 struct intel_hdcp *hdcp = &connector->hdcp;
1862 mutex_lock(&hdcp->mutex);
1864 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1865 hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
1866 if (hdcp->hdcp2_encrypted)
1867 ret = _intel_hdcp2_disable(connector);
1868 else if (hdcp->hdcp_encrypted)
1869 ret = _intel_hdcp_disable(connector);
1872 mutex_unlock(&hdcp->mutex);
1873 cancel_delayed_work_sync(&hdcp->check_work);
1877 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
1879 mutex_lock(&dev_priv->hdcp_comp_mutex);
1880 if (!dev_priv->hdcp_comp_added) {
1881 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1885 dev_priv->hdcp_comp_added = false;
1886 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1888 component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
1891 void intel_hdcp_cleanup(struct intel_connector *connector)
1893 if (!connector->hdcp.shim)
1896 mutex_lock(&connector->hdcp.mutex);
1897 kfree(connector->hdcp.port_data.streams);
1898 mutex_unlock(&connector->hdcp.mutex);
1901 void intel_hdcp_atomic_check(struct drm_connector *connector,
1902 struct drm_connector_state *old_state,
1903 struct drm_connector_state *new_state)
1905 u64 old_cp = old_state->content_protection;
1906 u64 new_cp = new_state->content_protection;
1907 struct drm_crtc_state *crtc_state;
1909 if (!new_state->crtc) {
1911 * If the connector is being disabled with CP enabled, mark it
1912 * desired so it's re-enabled when the connector is brought back
1914 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1915 new_state->content_protection =
1916 DRM_MODE_CONTENT_PROTECTION_DESIRED;
1921 * Nothing to do if the state didn't change, or HDCP was activated since
1924 if (old_cp == new_cp ||
1925 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1926 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1929 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
1931 crtc_state->mode_changed = true;
1934 /* Handles the CP_IRQ raised from the DP HDCP sink */
1935 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
1937 struct intel_hdcp *hdcp = &connector->hdcp;
1942 atomic_inc(&connector->hdcp.cp_irq_count);
1943 wake_up_all(&connector->hdcp.cp_irq_queue);
1945 schedule_delayed_work(&hdcp->check_work, 0);