1 /* SPDX-License-Identifier: MIT */
3 * Copyright (C) 2017 Google, Inc.
4 * Copyright _ 2017-2019, Intel Corporation.
7 * Sean Paul <seanpaul@chromium.org>
8 * Ramalingam C <ramalingam.c@intel.com>
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
20 #include "intel_display_power.h"
21 #include "intel_display_types.h"
22 #include "intel_hdcp.h"
23 #include "intel_sideband.h"
24 #include "intel_connector.h"
26 #define KEY_LOAD_TRIES 5
27 #define HDCP2_LC_RETRY_CNT 3
29 static int intel_conn_to_vcpi(struct intel_connector *connector)
31 /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
32 return connector->port ? connector->port->vcpi.vcpi : 0;
36 intel_streams_type1_capable(struct intel_connector *connector)
38 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
44 if (shim->streams_type1_capable)
45 shim->streams_type1_capable(connector, &capable);
51 * intel_hdcp_required_content_stream selects the most highest common possible HDCP
52 * content_type for all streams in DP MST topology because security f/w doesn't
53 * have any provision to mark content_type for each stream separately, it marks
54 * all available streams with the content_type proivided at the time of port
55 * authentication. This may prohibit the userspace to use type1 content on
56 * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
57 * DP MST topology. Though it is not compulsory, security fw should change its
58 * policy to mark different content_types for different streams.
61 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
63 struct drm_connector_list_iter conn_iter;
64 struct intel_digital_port *conn_dig_port;
65 struct intel_connector *connector;
66 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
67 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
68 bool enforce_type0 = false;
73 if (dig_port->hdcp_auth_status)
76 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
77 for_each_intel_connector_iter(connector, &conn_iter) {
78 if (connector->base.status == connector_status_disconnected)
81 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
84 conn_dig_port = intel_attached_dig_port(connector);
85 if (conn_dig_port != dig_port)
88 if (!enforce_type0 && !intel_streams_type1_capable(connector))
91 data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
94 /* if there is only one active stream */
95 if (dig_port->dp.active_mst_links <= 1)
98 drm_connector_list_iter_end(&conn_iter);
100 if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
104 * Apply common protection level across all streams in DP MST Topology.
105 * Use highest supported content type for all streams in DP MST Topology.
107 for (k = 0; k < data->k; k++)
108 data->streams[k].stream_type =
109 enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
115 bool intel_hdcp_is_ksv_valid(u8 *ksv)
118 /* KSV has 20 1's and 20 0's */
119 for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
120 ones += hweight8(ksv[i]);
128 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
129 const struct intel_hdcp_shim *shim, u8 *bksv)
131 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
132 int ret, i, tries = 2;
134 /* HDCP spec states that we must retry the bksv if it is invalid */
135 for (i = 0; i < tries; i++) {
136 ret = shim->read_bksv(dig_port, bksv);
139 if (intel_hdcp_is_ksv_valid(bksv))
143 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
150 /* Is HDCP1.4 capable on Platform and Sink */
151 bool intel_hdcp_capable(struct intel_connector *connector)
153 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
154 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
155 bool capable = false;
161 if (shim->hdcp_capable) {
162 shim->hdcp_capable(dig_port, &capable);
164 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
171 /* Is HDCP2.2 capable on Platform and Sink */
172 bool intel_hdcp2_capable(struct intel_connector *connector)
174 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
175 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
176 struct intel_hdcp *hdcp = &connector->hdcp;
177 bool capable = false;
179 /* I915 support for HDCP2.2 */
180 if (!hdcp->hdcp2_supported)
183 /* MEI interface is solid */
184 mutex_lock(&dev_priv->hdcp_comp_mutex);
185 if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
186 mutex_unlock(&dev_priv->hdcp_comp_mutex);
189 mutex_unlock(&dev_priv->hdcp_comp_mutex);
191 /* Sink's capability for HDCP2.2 */
192 hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
197 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
198 enum transcoder cpu_transcoder, enum port port)
200 return intel_de_read(dev_priv,
201 HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
205 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
206 enum transcoder cpu_transcoder, enum port port)
208 return intel_de_read(dev_priv,
209 HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
210 LINK_ENCRYPTION_STATUS;
213 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
214 const struct intel_hdcp_shim *shim)
219 /* Poll for ksv list ready (spec says max time allowed is 5s) */
220 ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
222 read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
234 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
236 enum i915_power_well_id id;
237 intel_wakeref_t wakeref;
238 bool enabled = false;
241 * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
242 * On all BXT+, SW can load the keys only when the PW#1 is turned on.
244 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
245 id = HSW_DISP_PW_GLOBAL;
249 /* PG1 (power well #1) needs to be enabled */
250 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
251 enabled = intel_display_power_well_is_enabled(dev_priv, id);
254 * Another req for hdcp key loadability is enabled state of pll for
255 * cdclk. Without active crtc we wont land here. So we are assuming that
256 * cdclk is already on.
262 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
264 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
265 intel_de_write(dev_priv, HDCP_KEY_STATUS,
266 HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
269 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
274 val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
275 if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
279 * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
280 * out of reset. So if Key is not already loaded, its an error state.
282 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
283 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
287 * Initiate loading the HDCP key from fuses.
289 * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
290 * platforms except BXT and GLK, differ in the key load trigger process
291 * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
293 if (IS_GEN9_BC(dev_priv)) {
294 ret = sandybridge_pcode_write(dev_priv,
295 SKL_PCODE_LOAD_HDCP_KEYS, 1);
297 drm_err(&dev_priv->drm,
298 "Failed to initiate HDCP key load (%d)\n",
303 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
306 /* Wait for the keys to load (500us) */
307 ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
308 HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
312 else if (!(val & HDCP_KEY_LOAD_STATUS))
315 /* Send Aksv over to PCH display for use in authentication */
316 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
321 /* Returns updated SHA-1 index */
322 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
324 intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
325 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
326 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
333 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
334 enum transcoder cpu_transcoder, enum port port)
336 if (DISPLAY_VER(dev_priv) >= 12) {
337 switch (cpu_transcoder) {
339 return HDCP_TRANSA_REP_PRESENT |
342 return HDCP_TRANSB_REP_PRESENT |
345 return HDCP_TRANSC_REP_PRESENT |
348 return HDCP_TRANSD_REP_PRESENT |
351 drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
359 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
361 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
363 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
365 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
367 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
369 drm_err(&dev_priv->drm, "Unknown port %d\n", port);
375 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
376 const struct intel_hdcp_shim *shim,
377 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
379 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
380 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
381 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
382 enum port port = dig_port->base.port;
383 u32 vprime, sha_text, sha_leftovers, rep_ctl;
384 int ret, i, j, sha_idx;
386 /* Process V' values from the receiver */
387 for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
388 ret = shim->read_v_prime_part(dig_port, i, &vprime);
391 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
395 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
396 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
397 * stream is written via the HDCP_SHA_TEXT register in 32-bit
398 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
399 * index will keep track of our progress through the 64 bytes as well as
400 * helping us work the 40-bit KSVs through our 32-bit register.
402 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
407 rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
408 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
409 for (i = 0; i < num_downstream; i++) {
410 unsigned int sha_empty;
411 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
413 /* Fill up the empty slots in sha_text and write it out */
414 sha_empty = sizeof(sha_text) - sha_leftovers;
415 for (j = 0; j < sha_empty; j++) {
416 u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
417 sha_text |= ksv[j] << off;
420 ret = intel_write_sha_text(dev_priv, sha_text);
424 /* Programming guide writes this every 64 bytes */
425 sha_idx += sizeof(sha_text);
427 intel_de_write(dev_priv, HDCP_REP_CTL,
428 rep_ctl | HDCP_SHA1_TEXT_32);
430 /* Store the leftover bytes from the ksv in sha_text */
431 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
433 for (j = 0; j < sha_leftovers; j++)
434 sha_text |= ksv[sha_empty + j] <<
435 ((sizeof(sha_text) - j - 1) * 8);
438 * If we still have room in sha_text for more data, continue.
439 * Otherwise, write it out immediately.
441 if (sizeof(sha_text) > sha_leftovers)
444 ret = intel_write_sha_text(dev_priv, sha_text);
449 sha_idx += sizeof(sha_text);
453 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
454 * bytes are leftover from the last ksv, we might be able to fit them
455 * all in sha_text (first 2 cases), or we might need to split them up
456 * into 2 writes (last 2 cases).
458 if (sha_leftovers == 0) {
459 /* Write 16 bits of text, 16 bits of M0 */
460 intel_de_write(dev_priv, HDCP_REP_CTL,
461 rep_ctl | HDCP_SHA1_TEXT_16);
462 ret = intel_write_sha_text(dev_priv,
463 bstatus[0] << 8 | bstatus[1]);
466 sha_idx += sizeof(sha_text);
468 /* Write 32 bits of M0 */
469 intel_de_write(dev_priv, HDCP_REP_CTL,
470 rep_ctl | HDCP_SHA1_TEXT_0);
471 ret = intel_write_sha_text(dev_priv, 0);
474 sha_idx += sizeof(sha_text);
476 /* Write 16 bits of M0 */
477 intel_de_write(dev_priv, HDCP_REP_CTL,
478 rep_ctl | HDCP_SHA1_TEXT_16);
479 ret = intel_write_sha_text(dev_priv, 0);
482 sha_idx += sizeof(sha_text);
484 } else if (sha_leftovers == 1) {
485 /* Write 24 bits of text, 8 bits of M0 */
486 intel_de_write(dev_priv, HDCP_REP_CTL,
487 rep_ctl | HDCP_SHA1_TEXT_24);
488 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
489 /* Only 24-bits of data, must be in the LSB */
490 sha_text = (sha_text & 0xffffff00) >> 8;
491 ret = intel_write_sha_text(dev_priv, sha_text);
494 sha_idx += sizeof(sha_text);
496 /* Write 32 bits of M0 */
497 intel_de_write(dev_priv, HDCP_REP_CTL,
498 rep_ctl | HDCP_SHA1_TEXT_0);
499 ret = intel_write_sha_text(dev_priv, 0);
502 sha_idx += sizeof(sha_text);
504 /* Write 24 bits of M0 */
505 intel_de_write(dev_priv, HDCP_REP_CTL,
506 rep_ctl | HDCP_SHA1_TEXT_8);
507 ret = intel_write_sha_text(dev_priv, 0);
510 sha_idx += sizeof(sha_text);
512 } else if (sha_leftovers == 2) {
513 /* Write 32 bits of text */
514 intel_de_write(dev_priv, HDCP_REP_CTL,
515 rep_ctl | HDCP_SHA1_TEXT_32);
516 sha_text |= bstatus[0] << 8 | bstatus[1];
517 ret = intel_write_sha_text(dev_priv, sha_text);
520 sha_idx += sizeof(sha_text);
522 /* Write 64 bits of M0 */
523 intel_de_write(dev_priv, HDCP_REP_CTL,
524 rep_ctl | HDCP_SHA1_TEXT_0);
525 for (i = 0; i < 2; i++) {
526 ret = intel_write_sha_text(dev_priv, 0);
529 sha_idx += sizeof(sha_text);
533 * Terminate the SHA-1 stream by hand. For the other leftover
534 * cases this is appended by the hardware.
536 intel_de_write(dev_priv, HDCP_REP_CTL,
537 rep_ctl | HDCP_SHA1_TEXT_32);
538 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
539 ret = intel_write_sha_text(dev_priv, sha_text);
542 sha_idx += sizeof(sha_text);
543 } else if (sha_leftovers == 3) {
544 /* Write 32 bits of text (filled from LSB) */
545 intel_de_write(dev_priv, HDCP_REP_CTL,
546 rep_ctl | HDCP_SHA1_TEXT_32);
547 sha_text |= bstatus[0];
548 ret = intel_write_sha_text(dev_priv, sha_text);
551 sha_idx += sizeof(sha_text);
553 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
554 intel_de_write(dev_priv, HDCP_REP_CTL,
555 rep_ctl | HDCP_SHA1_TEXT_8);
556 ret = intel_write_sha_text(dev_priv, bstatus[1]);
559 sha_idx += sizeof(sha_text);
561 /* Write 32 bits of M0 */
562 intel_de_write(dev_priv, HDCP_REP_CTL,
563 rep_ctl | HDCP_SHA1_TEXT_0);
564 ret = intel_write_sha_text(dev_priv, 0);
567 sha_idx += sizeof(sha_text);
569 /* Write 8 bits of M0 */
570 intel_de_write(dev_priv, HDCP_REP_CTL,
571 rep_ctl | HDCP_SHA1_TEXT_24);
572 ret = intel_write_sha_text(dev_priv, 0);
575 sha_idx += sizeof(sha_text);
577 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
582 intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
583 /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
584 while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
585 ret = intel_write_sha_text(dev_priv, 0);
588 sha_idx += sizeof(sha_text);
592 * Last write gets the length of the concatenation in bits. That is:
593 * - 5 bytes per device
594 * - 10 bytes for BINFO/BSTATUS(2), M0(8)
596 sha_text = (num_downstream * 5 + 10) * 8;
597 ret = intel_write_sha_text(dev_priv, sha_text);
601 /* Tell the HW we're done with the hash and wait for it to ACK */
602 intel_de_write(dev_priv, HDCP_REP_CTL,
603 rep_ctl | HDCP_SHA1_COMPLETE_HASH);
604 if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
605 HDCP_SHA1_COMPLETE, 1)) {
606 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
609 if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
610 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
617 /* Implements Part 2 of the HDCP authorization procedure */
619 int intel_hdcp_auth_downstream(struct intel_connector *connector)
621 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
622 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
623 const struct intel_hdcp_shim *shim = connector->hdcp.shim;
624 u8 bstatus[2], num_downstream, *ksv_fifo;
625 int ret, i, tries = 3;
627 ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
629 drm_dbg_kms(&dev_priv->drm,
630 "KSV list failed to become ready (%d)\n", ret);
634 ret = shim->read_bstatus(dig_port, bstatus);
638 if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
639 DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
640 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
645 * When repeater reports 0 device count, HDCP1.4 spec allows disabling
646 * the HDCP encryption. That implies that repeater can't have its own
647 * display. As there is no consumption of encrypted content in the
648 * repeater with 0 downstream devices, we are failing the
651 num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
652 if (num_downstream == 0) {
653 drm_dbg_kms(&dev_priv->drm,
654 "Repeater with zero downstream devices\n");
658 ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
660 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
664 ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
668 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
669 num_downstream) > 0) {
670 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
676 * When V prime mismatches, DP Spec mandates re-read of
677 * V prime atleast twice.
679 for (i = 0; i < tries; i++) {
680 ret = intel_hdcp_validate_v_prime(connector, shim,
681 ksv_fifo, num_downstream,
688 drm_dbg_kms(&dev_priv->drm,
689 "V Prime validation failed.(%d)\n", ret);
693 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
701 /* Implements Part 1 of the HDCP authorization procedure */
702 static int intel_hdcp_auth(struct intel_connector *connector)
704 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
705 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
706 struct intel_hdcp *hdcp = &connector->hdcp;
707 const struct intel_hdcp_shim *shim = hdcp->shim;
708 enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
709 enum port port = dig_port->base.port;
710 unsigned long r0_prime_gen_start;
711 int ret, i, tries = 2;
714 u8 shim[DRM_HDCP_AN_LEN];
718 u8 shim[DRM_HDCP_KSV_LEN];
722 u8 shim[DRM_HDCP_RI_LEN];
724 bool repeater_present, hdcp_capable;
727 * Detects whether the display is HDCP capable. Although we check for
728 * valid Bksv below, the HDCP over DP spec requires that we check
729 * whether the display supports HDCP before we write An. For HDMI
730 * displays, this is not necessary.
732 if (shim->hdcp_capable) {
733 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
737 drm_dbg_kms(&dev_priv->drm,
738 "Panel is not HDCP capable\n");
743 /* Initialize An with 2 random values and acquire it */
744 for (i = 0; i < 2; i++)
745 intel_de_write(dev_priv,
746 HDCP_ANINIT(dev_priv, cpu_transcoder, port),
748 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
749 HDCP_CONF_CAPTURE_AN);
751 /* Wait for An to be acquired */
752 if (intel_de_wait_for_set(dev_priv,
753 HDCP_STATUS(dev_priv, cpu_transcoder, port),
754 HDCP_STATUS_AN_READY, 1)) {
755 drm_err(&dev_priv->drm, "Timed out waiting for An\n");
759 an.reg[0] = intel_de_read(dev_priv,
760 HDCP_ANLO(dev_priv, cpu_transcoder, port));
761 an.reg[1] = intel_de_read(dev_priv,
762 HDCP_ANHI(dev_priv, cpu_transcoder, port));
763 ret = shim->write_an_aksv(dig_port, an.shim);
767 r0_prime_gen_start = jiffies;
769 memset(&bksv, 0, sizeof(bksv));
771 ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
775 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
776 drm_err(&dev_priv->drm, "BKSV is revoked\n");
780 intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
782 intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
785 ret = shim->repeater_present(dig_port, &repeater_present);
788 if (repeater_present)
789 intel_de_write(dev_priv, HDCP_REP_CTL,
790 intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
792 ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
796 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
797 HDCP_CONF_AUTH_AND_ENC);
799 /* Wait for R0 ready */
800 if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
801 (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
802 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
807 * Wait for R0' to become available. The spec says 100ms from Aksv, but
808 * some monitors can take longer than this. We'll set the timeout at
809 * 300ms just to be sure.
811 * On DP, there's an R0_READY bit available but no such bit
812 * exists on HDMI. Since the upper-bound is the same, we'll just do
813 * the stupid thing instead of polling on one and not the other.
815 wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
820 * DP HDCP Spec mandates the two more reattempt to read R0, incase
823 for (i = 0; i < tries; i++) {
825 ret = shim->read_ri_prime(dig_port, ri.shim);
828 intel_de_write(dev_priv,
829 HDCP_RPRIME(dev_priv, cpu_transcoder, port),
832 /* Wait for Ri prime match */
833 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
834 (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
839 drm_dbg_kms(&dev_priv->drm,
840 "Timed out waiting for Ri prime match (%x)\n",
841 intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
842 cpu_transcoder, port)));
846 /* Wait for encryption confirmation */
847 if (intel_de_wait_for_set(dev_priv,
848 HDCP_STATUS(dev_priv, cpu_transcoder, port),
850 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
851 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
855 /* DP MST Auth Part 1 Step 2.a and Step 2.b */
856 if (shim->stream_encryption) {
857 ret = shim->stream_encryption(connector, true);
859 drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
860 connector->base.name, connector->base.base.id);
863 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
864 transcoder_name(hdcp->stream_transcoder));
867 if (repeater_present)
868 return intel_hdcp_auth_downstream(connector);
870 drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
874 static int _intel_hdcp_disable(struct intel_connector *connector)
876 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
877 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
878 struct intel_hdcp *hdcp = &connector->hdcp;
879 enum port port = dig_port->base.port;
880 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
884 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
885 connector->base.name, connector->base.base.id);
887 if (hdcp->shim->stream_encryption) {
888 ret = hdcp->shim->stream_encryption(connector, false);
890 drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
891 connector->base.name, connector->base.base.id);
894 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
895 transcoder_name(hdcp->stream_transcoder));
897 * If there are other connectors on this port using HDCP,
898 * don't disable it until it disabled HDCP encryption for
899 * all connectors in MST topology.
901 if (dig_port->num_hdcp_streams > 0)
905 hdcp->hdcp_encrypted = false;
906 intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
907 if (intel_de_wait_for_clear(dev_priv,
908 HDCP_STATUS(dev_priv, cpu_transcoder, port),
909 ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
910 drm_err(&dev_priv->drm,
911 "Failed to disable HDCP, timeout clearing status\n");
915 repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
917 intel_de_write(dev_priv, HDCP_REP_CTL,
918 intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
920 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
922 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
926 drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
930 static int _intel_hdcp_enable(struct intel_connector *connector)
932 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
933 struct intel_hdcp *hdcp = &connector->hdcp;
934 int i, ret, tries = 3;
936 drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
937 connector->base.name, connector->base.base.id);
939 if (!hdcp_key_loadable(dev_priv)) {
940 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
944 for (i = 0; i < KEY_LOAD_TRIES; i++) {
945 ret = intel_hdcp_load_keys(dev_priv);
948 intel_hdcp_clear_keys(dev_priv);
951 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
956 /* Incase of authentication failures, HDCP spec expects reauth. */
957 for (i = 0; i < tries; i++) {
958 ret = intel_hdcp_auth(connector);
960 hdcp->hdcp_encrypted = true;
964 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
966 /* Ensuring HDCP encryption and signalling are stopped. */
967 _intel_hdcp_disable(connector);
970 drm_dbg_kms(&dev_priv->drm,
971 "HDCP authentication failed (%d tries/%d)\n", tries, ret);
975 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
977 return container_of(hdcp, struct intel_connector, hdcp);
980 static void intel_hdcp_update_value(struct intel_connector *connector,
981 u64 value, bool update_property)
983 struct drm_device *dev = connector->base.dev;
984 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
985 struct intel_hdcp *hdcp = &connector->hdcp;
987 drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
989 if (hdcp->value == value)
992 drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
994 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
995 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
996 dig_port->num_hdcp_streams--;
997 } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
998 dig_port->num_hdcp_streams++;
1001 hdcp->value = value;
1002 if (update_property) {
1003 drm_connector_get(&connector->base);
1004 schedule_work(&hdcp->prop_work);
1008 /* Implements Part 3 of the HDCP authorization procedure */
1009 static int intel_hdcp_check_link(struct intel_connector *connector)
1011 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1012 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1013 struct intel_hdcp *hdcp = &connector->hdcp;
1014 enum port port = dig_port->base.port;
1015 enum transcoder cpu_transcoder;
1018 mutex_lock(&hdcp->mutex);
1019 mutex_lock(&dig_port->hdcp_mutex);
1021 cpu_transcoder = hdcp->cpu_transcoder;
1023 /* Check_link valid only when HDCP1.4 is enabled */
1024 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1025 !hdcp->hdcp_encrypted) {
1030 if (drm_WARN_ON(&dev_priv->drm,
1031 !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1032 drm_err(&dev_priv->drm,
1033 "%s:%d HDCP link stopped encryption,%x\n",
1034 connector->base.name, connector->base.base.id,
1035 intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1037 intel_hdcp_update_value(connector,
1038 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1043 if (hdcp->shim->check_link(dig_port, connector)) {
1044 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1045 intel_hdcp_update_value(connector,
1046 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1051 drm_dbg_kms(&dev_priv->drm,
1052 "[%s:%d] HDCP link failed, retrying authentication\n",
1053 connector->base.name, connector->base.base.id);
1055 ret = _intel_hdcp_disable(connector);
1057 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1058 intel_hdcp_update_value(connector,
1059 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1064 ret = _intel_hdcp_enable(connector);
1066 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1067 intel_hdcp_update_value(connector,
1068 DRM_MODE_CONTENT_PROTECTION_DESIRED,
1074 mutex_unlock(&dig_port->hdcp_mutex);
1075 mutex_unlock(&hdcp->mutex);
1079 static void intel_hdcp_prop_work(struct work_struct *work)
1081 struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1083 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1084 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1086 drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1087 mutex_lock(&hdcp->mutex);
1090 * This worker is only used to flip between ENABLED/DESIRED. Either of
1091 * those to UNDESIRED is handled by core. If value == UNDESIRED,
1092 * we're running just after hdcp has been disabled, so just exit
1094 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1095 drm_hdcp_update_content_protection(&connector->base,
1098 mutex_unlock(&hdcp->mutex);
1099 drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1101 drm_connector_put(&connector->base);
1104 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1106 return INTEL_INFO(dev_priv)->display.has_hdcp &&
1107 (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1111 hdcp2_prepare_ake_init(struct intel_connector *connector,
1112 struct hdcp2_ake_init *ake_data)
1114 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1115 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1116 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1117 struct i915_hdcp_comp_master *comp;
1120 mutex_lock(&dev_priv->hdcp_comp_mutex);
1121 comp = dev_priv->hdcp_master;
1123 if (!comp || !comp->ops) {
1124 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1128 ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1130 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1132 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1138 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1139 struct hdcp2_ake_send_cert *rx_cert,
1141 struct hdcp2_ake_no_stored_km *ek_pub_km,
1144 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1145 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1146 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1147 struct i915_hdcp_comp_master *comp;
1150 mutex_lock(&dev_priv->hdcp_comp_mutex);
1151 comp = dev_priv->hdcp_master;
1153 if (!comp || !comp->ops) {
1154 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1158 ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1162 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1164 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1169 static int hdcp2_verify_hprime(struct intel_connector *connector,
1170 struct hdcp2_ake_send_hprime *rx_hprime)
1172 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1173 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1174 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1175 struct i915_hdcp_comp_master *comp;
1178 mutex_lock(&dev_priv->hdcp_comp_mutex);
1179 comp = dev_priv->hdcp_master;
1181 if (!comp || !comp->ops) {
1182 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1186 ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1188 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1189 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1195 hdcp2_store_pairing_info(struct intel_connector *connector,
1196 struct hdcp2_ake_send_pairing_info *pairing_info)
1198 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1199 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1200 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1201 struct i915_hdcp_comp_master *comp;
1204 mutex_lock(&dev_priv->hdcp_comp_mutex);
1205 comp = dev_priv->hdcp_master;
1207 if (!comp || !comp->ops) {
1208 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1212 ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1214 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1216 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1222 hdcp2_prepare_lc_init(struct intel_connector *connector,
1223 struct hdcp2_lc_init *lc_init)
1225 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1226 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1227 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1228 struct i915_hdcp_comp_master *comp;
1231 mutex_lock(&dev_priv->hdcp_comp_mutex);
1232 comp = dev_priv->hdcp_master;
1234 if (!comp || !comp->ops) {
1235 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1239 ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1241 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1243 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1249 hdcp2_verify_lprime(struct intel_connector *connector,
1250 struct hdcp2_lc_send_lprime *rx_lprime)
1252 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1253 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1254 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1255 struct i915_hdcp_comp_master *comp;
1258 mutex_lock(&dev_priv->hdcp_comp_mutex);
1259 comp = dev_priv->hdcp_master;
1261 if (!comp || !comp->ops) {
1262 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1266 ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1268 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1270 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1275 static int hdcp2_prepare_skey(struct intel_connector *connector,
1276 struct hdcp2_ske_send_eks *ske_data)
1278 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1279 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1280 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1281 struct i915_hdcp_comp_master *comp;
1284 mutex_lock(&dev_priv->hdcp_comp_mutex);
1285 comp = dev_priv->hdcp_master;
1287 if (!comp || !comp->ops) {
1288 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1292 ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1294 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1296 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1302 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1303 struct hdcp2_rep_send_receiverid_list
1305 struct hdcp2_rep_send_ack *rep_send_ack)
1307 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1308 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1309 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1310 struct i915_hdcp_comp_master *comp;
1313 mutex_lock(&dev_priv->hdcp_comp_mutex);
1314 comp = dev_priv->hdcp_master;
1316 if (!comp || !comp->ops) {
1317 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1321 ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1325 drm_dbg_kms(&dev_priv->drm,
1326 "Verify rep topology failed. %d\n", ret);
1327 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1333 hdcp2_verify_mprime(struct intel_connector *connector,
1334 struct hdcp2_rep_stream_ready *stream_ready)
1336 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1337 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1338 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1339 struct i915_hdcp_comp_master *comp;
1342 mutex_lock(&dev_priv->hdcp_comp_mutex);
1343 comp = dev_priv->hdcp_master;
1345 if (!comp || !comp->ops) {
1346 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1350 ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1352 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1353 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1358 static int hdcp2_authenticate_port(struct intel_connector *connector)
1360 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1361 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1362 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1363 struct i915_hdcp_comp_master *comp;
1366 mutex_lock(&dev_priv->hdcp_comp_mutex);
1367 comp = dev_priv->hdcp_master;
1369 if (!comp || !comp->ops) {
1370 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1374 ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1376 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1378 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1383 static int hdcp2_close_mei_session(struct intel_connector *connector)
1385 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1386 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1387 struct i915_hdcp_comp_master *comp;
1390 mutex_lock(&dev_priv->hdcp_comp_mutex);
1391 comp = dev_priv->hdcp_master;
1393 if (!comp || !comp->ops) {
1394 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1398 ret = comp->ops->close_hdcp_session(comp->mei_dev,
1399 &dig_port->hdcp_port_data);
1400 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1405 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1407 return hdcp2_close_mei_session(connector);
1410 /* Authentication flow starts from here */
1411 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1413 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1414 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1415 struct intel_hdcp *hdcp = &connector->hdcp;
1417 struct hdcp2_ake_init ake_init;
1418 struct hdcp2_ake_send_cert send_cert;
1419 struct hdcp2_ake_no_stored_km no_stored_km;
1420 struct hdcp2_ake_send_hprime send_hprime;
1421 struct hdcp2_ake_send_pairing_info pairing_info;
1423 const struct intel_hdcp_shim *shim = hdcp->shim;
1427 /* Init for seq_num */
1428 hdcp->seq_num_v = 0;
1429 hdcp->seq_num_m = 0;
1431 ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1435 ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1436 sizeof(msgs.ake_init));
1440 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1441 &msgs.send_cert, sizeof(msgs.send_cert));
1445 if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1446 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1450 hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1452 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1453 msgs.send_cert.cert_rx.receiver_id,
1455 drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1460 * Here msgs.no_stored_km will hold msgs corresponding to the km
1463 ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1465 &msgs.no_stored_km, &size);
1469 ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1473 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1474 &msgs.send_hprime, sizeof(msgs.send_hprime));
1478 ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1482 if (!hdcp->is_paired) {
1483 /* Pairing is required */
1484 ret = shim->read_2_2_msg(dig_port,
1485 HDCP_2_2_AKE_SEND_PAIRING_INFO,
1487 sizeof(msgs.pairing_info));
1491 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1494 hdcp->is_paired = true;
1500 static int hdcp2_locality_check(struct intel_connector *connector)
1502 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1503 struct intel_hdcp *hdcp = &connector->hdcp;
1505 struct hdcp2_lc_init lc_init;
1506 struct hdcp2_lc_send_lprime send_lprime;
1508 const struct intel_hdcp_shim *shim = hdcp->shim;
1509 int tries = HDCP2_LC_RETRY_CNT, ret, i;
1511 for (i = 0; i < tries; i++) {
1512 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1516 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1517 sizeof(msgs.lc_init));
1521 ret = shim->read_2_2_msg(dig_port,
1522 HDCP_2_2_LC_SEND_LPRIME,
1524 sizeof(msgs.send_lprime));
1528 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1536 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1538 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1539 struct intel_hdcp *hdcp = &connector->hdcp;
1540 struct hdcp2_ske_send_eks send_eks;
1543 ret = hdcp2_prepare_skey(connector, &send_eks);
1547 ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1556 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1558 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1559 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1560 struct intel_hdcp *hdcp = &connector->hdcp;
1562 struct hdcp2_rep_stream_manage stream_manage;
1563 struct hdcp2_rep_stream_ready stream_ready;
1565 const struct intel_hdcp_shim *shim = hdcp->shim;
1566 int ret, streams_size_delta, i;
1568 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1571 /* Prepare RepeaterAuth_Stream_Manage msg */
1572 msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1573 drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1575 msgs.stream_manage.k = cpu_to_be16(data->k);
1577 for (i = 0; i < data->k; i++) {
1578 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1579 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1582 streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1583 sizeof(struct hdcp2_streamid_type);
1584 /* Send it to Repeater */
1585 ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1586 sizeof(msgs.stream_manage) - streams_size_delta);
1590 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1591 &msgs.stream_ready, sizeof(msgs.stream_ready));
1595 data->seq_num_m = hdcp->seq_num_m;
1597 ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1606 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1608 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1609 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1610 struct intel_hdcp *hdcp = &connector->hdcp;
1612 struct hdcp2_rep_send_receiverid_list recvid_list;
1613 struct hdcp2_rep_send_ack rep_ack;
1615 const struct intel_hdcp_shim *shim = hdcp->shim;
1616 u32 seq_num_v, device_cnt;
1620 ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1621 &msgs.recvid_list, sizeof(msgs.recvid_list));
1625 rx_info = msgs.recvid_list.rx_info;
1627 if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1628 HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1629 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1633 /* Converting and Storing the seq_num_v to local variable as DWORD */
1635 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1637 if (!hdcp->hdcp2_encrypted && seq_num_v) {
1638 drm_dbg_kms(&dev_priv->drm,
1639 "Non zero Seq_num_v at first RecvId_List msg\n");
1643 if (seq_num_v < hdcp->seq_num_v) {
1644 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1645 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1649 device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1650 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1651 if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1652 msgs.recvid_list.receiver_ids,
1654 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1658 ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1664 hdcp->seq_num_v = seq_num_v;
1665 ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1666 sizeof(msgs.rep_ack));
1673 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1675 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1676 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1677 struct intel_hdcp *hdcp = &connector->hdcp;
1678 const struct intel_hdcp_shim *shim = hdcp->shim;
1681 ret = hdcp2_authentication_key_exchange(connector);
1683 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1687 ret = hdcp2_locality_check(connector);
1689 drm_dbg_kms(&i915->drm,
1690 "Locality Check failed. Err : %d\n", ret);
1694 ret = hdcp2_session_key_exchange(connector);
1696 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1700 if (shim->config_stream_type) {
1701 ret = shim->config_stream_type(dig_port,
1703 hdcp->content_type);
1708 if (hdcp->is_repeater) {
1709 ret = hdcp2_authenticate_repeater_topology(connector);
1711 drm_dbg_kms(&i915->drm,
1712 "Repeater Auth Failed. Err: %d\n", ret);
1720 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1722 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1723 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1724 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1725 struct intel_hdcp *hdcp = &connector->hdcp;
1726 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1727 enum port port = dig_port->base.port;
1730 if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1731 LINK_ENCRYPTION_STATUS)) {
1732 drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1733 connector->base.name, connector->base.base.id);
1738 if (hdcp->shim->stream_2_2_encryption) {
1739 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1741 drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1742 connector->base.name, connector->base.base.id);
1745 drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1746 transcoder_name(hdcp->stream_transcoder));
1752 if (hdcp2_deauthenticate_port(connector) < 0)
1753 drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1755 dig_port->hdcp_auth_status = false;
1761 static int hdcp2_enable_encryption(struct intel_connector *connector)
1763 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1764 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1765 struct intel_hdcp *hdcp = &connector->hdcp;
1766 enum port port = dig_port->base.port;
1767 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1770 drm_WARN_ON(&dev_priv->drm,
1771 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1772 LINK_ENCRYPTION_STATUS);
1773 if (hdcp->shim->toggle_signalling) {
1774 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1777 drm_err(&dev_priv->drm,
1778 "Failed to enable HDCP signalling. %d\n",
1784 if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1786 /* Link is Authenticated. Now set for Encryption */
1787 intel_de_write(dev_priv,
1788 HDCP2_CTL(dev_priv, cpu_transcoder, port),
1789 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1792 ret = intel_de_wait_for_set(dev_priv,
1793 HDCP2_STATUS(dev_priv, cpu_transcoder,
1795 LINK_ENCRYPTION_STATUS,
1796 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1797 dig_port->hdcp_auth_status = true;
1802 static int hdcp2_disable_encryption(struct intel_connector *connector)
1804 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1805 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1806 struct intel_hdcp *hdcp = &connector->hdcp;
1807 enum port port = dig_port->base.port;
1808 enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1811 drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1812 LINK_ENCRYPTION_STATUS));
1814 intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1815 intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1817 ret = intel_de_wait_for_clear(dev_priv,
1818 HDCP2_STATUS(dev_priv, cpu_transcoder,
1820 LINK_ENCRYPTION_STATUS,
1821 HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1822 if (ret == -ETIMEDOUT)
1823 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1825 if (hdcp->shim->toggle_signalling) {
1826 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1829 drm_err(&dev_priv->drm,
1830 "Failed to disable HDCP signalling. %d\n",
1840 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1842 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1843 int i, tries = 3, ret;
1845 if (!connector->hdcp.is_repeater)
1848 for (i = 0; i < tries; i++) {
1849 ret = _hdcp2_propagate_stream_management_info(connector);
1853 /* Lets restart the auth incase of seq_num_m roll over */
1854 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1855 drm_dbg_kms(&i915->drm,
1856 "seq_num_m roll over.(%d)\n", ret);
1860 drm_dbg_kms(&i915->drm,
1861 "HDCP2 stream management %d of %d Failed.(%d)\n",
1868 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1870 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1871 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1872 int ret = 0, i, tries = 3;
1874 for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1875 ret = hdcp2_authenticate_sink(connector);
1877 ret = hdcp2_propagate_stream_management_info(connector);
1879 drm_dbg_kms(&i915->drm,
1880 "Stream management failed.(%d)\n",
1885 ret = hdcp2_authenticate_port(connector);
1888 drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1892 /* Clearing the mei hdcp session */
1893 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1895 if (hdcp2_deauthenticate_port(connector) < 0)
1896 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1899 if (!ret && !dig_port->hdcp_auth_status) {
1901 * Ensuring the required 200mSec min time interval between
1902 * Session Key Exchange and encryption.
1904 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1905 ret = hdcp2_enable_encryption(connector);
1907 drm_dbg_kms(&i915->drm,
1908 "Encryption Enable Failed.(%d)\n", ret);
1909 if (hdcp2_deauthenticate_port(connector) < 0)
1910 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1915 ret = hdcp2_enable_stream_encryption(connector);
1920 static int _intel_hdcp2_enable(struct intel_connector *connector)
1922 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1923 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1924 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1925 struct intel_hdcp *hdcp = &connector->hdcp;
1928 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1929 connector->base.name, connector->base.base.id,
1930 hdcp->content_type);
1932 /* Stream which requires encryption */
1933 if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
1935 data->streams[0].stream_type = hdcp->content_type;
1937 ret = intel_hdcp_required_content_stream(dig_port);
1942 ret = hdcp2_authenticate_and_encrypt(connector);
1944 drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
1945 hdcp->content_type, ret);
1949 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1950 connector->base.name, connector->base.base.id,
1951 hdcp->content_type);
1953 hdcp->hdcp2_encrypted = true;
1958 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1960 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1961 struct drm_i915_private *i915 = to_i915(connector->base.dev);
1962 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1963 struct intel_hdcp *hdcp = &connector->hdcp;
1966 drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1967 connector->base.name, connector->base.base.id);
1969 if (hdcp->shim->stream_2_2_encryption) {
1970 ret = hdcp->shim->stream_2_2_encryption(connector, false);
1972 drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1973 connector->base.name, connector->base.base.id);
1976 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1977 transcoder_name(hdcp->stream_transcoder));
1979 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
1983 ret = hdcp2_disable_encryption(connector);
1985 if (hdcp2_deauthenticate_port(connector) < 0)
1986 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1988 connector->hdcp.hdcp2_encrypted = false;
1989 dig_port->hdcp_auth_status = false;
1995 /* Implements the Link Integrity Check for HDCP2.2 */
1996 static int intel_hdcp2_check_link(struct intel_connector *connector)
1998 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1999 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2000 struct intel_hdcp *hdcp = &connector->hdcp;
2001 enum port port = dig_port->base.port;
2002 enum transcoder cpu_transcoder;
2005 mutex_lock(&hdcp->mutex);
2006 mutex_lock(&dig_port->hdcp_mutex);
2007 cpu_transcoder = hdcp->cpu_transcoder;
2009 /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2010 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2011 !hdcp->hdcp2_encrypted) {
2016 if (drm_WARN_ON(&dev_priv->drm,
2017 !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2018 drm_err(&dev_priv->drm,
2019 "HDCP2.2 link stopped the encryption, %x\n",
2020 intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2022 _intel_hdcp2_disable(connector, true);
2023 intel_hdcp_update_value(connector,
2024 DRM_MODE_CONTENT_PROTECTION_DESIRED,
2029 ret = hdcp->shim->check_2_2_link(dig_port, connector);
2030 if (ret == HDCP_LINK_PROTECTED) {
2031 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2032 intel_hdcp_update_value(connector,
2033 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2039 if (ret == HDCP_TOPOLOGY_CHANGE) {
2040 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2043 drm_dbg_kms(&dev_priv->drm,
2044 "HDCP2.2 Downstream topology change\n");
2045 ret = hdcp2_authenticate_repeater_topology(connector);
2047 intel_hdcp_update_value(connector,
2048 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2052 drm_dbg_kms(&dev_priv->drm,
2053 "[%s:%d] Repeater topology auth failed.(%d)\n",
2054 connector->base.name, connector->base.base.id,
2057 drm_dbg_kms(&dev_priv->drm,
2058 "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2059 connector->base.name, connector->base.base.id);
2062 ret = _intel_hdcp2_disable(connector, true);
2064 drm_err(&dev_priv->drm,
2065 "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2066 connector->base.name, connector->base.base.id, ret);
2067 intel_hdcp_update_value(connector,
2068 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2072 ret = _intel_hdcp2_enable(connector);
2074 drm_dbg_kms(&dev_priv->drm,
2075 "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2076 connector->base.name, connector->base.base.id,
2078 intel_hdcp_update_value(connector,
2079 DRM_MODE_CONTENT_PROTECTION_DESIRED,
2085 mutex_unlock(&dig_port->hdcp_mutex);
2086 mutex_unlock(&hdcp->mutex);
2090 static void intel_hdcp_check_work(struct work_struct *work)
2092 struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2095 struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2097 if (drm_connector_is_unregistered(&connector->base))
2100 if (!intel_hdcp2_check_link(connector))
2101 schedule_delayed_work(&hdcp->check_work,
2102 DRM_HDCP2_CHECK_PERIOD_MS);
2103 else if (!intel_hdcp_check_link(connector))
2104 schedule_delayed_work(&hdcp->check_work,
2105 DRM_HDCP_CHECK_PERIOD_MS);
2108 static int i915_hdcp_component_bind(struct device *i915_kdev,
2109 struct device *mei_kdev, void *data)
2111 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2113 drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2114 mutex_lock(&dev_priv->hdcp_comp_mutex);
2115 dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
2116 dev_priv->hdcp_master->mei_dev = mei_kdev;
2117 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2122 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2123 struct device *mei_kdev, void *data)
2125 struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2127 drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2128 mutex_lock(&dev_priv->hdcp_comp_mutex);
2129 dev_priv->hdcp_master = NULL;
2130 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2133 static const struct component_ops i915_hdcp_component_ops = {
2134 .bind = i915_hdcp_component_bind,
2135 .unbind = i915_hdcp_component_unbind,
2138 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2143 case PORT_B ... PORT_F:
2144 return (enum mei_fw_ddi)port;
2146 return MEI_DDI_INVALID_PORT;
2150 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2152 switch (cpu_transcoder) {
2153 case TRANSCODER_A ... TRANSCODER_D:
2154 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2155 default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2156 return MEI_INVALID_TRANSCODER;
2160 static int initialize_hdcp_port_data(struct intel_connector *connector,
2161 struct intel_digital_port *dig_port,
2162 const struct intel_hdcp_shim *shim)
2164 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2165 struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2166 struct intel_hdcp *hdcp = &connector->hdcp;
2167 enum port port = dig_port->base.port;
2169 if (DISPLAY_VER(dev_priv) < 12)
2170 data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2173 * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2174 * with zero(INVALID PORT index).
2176 data->fw_ddi = MEI_DDI_INVALID_PORT;
2179 * As associated transcoder is set and modified at modeset, here fw_tc
2180 * is initialized to zero (invalid transcoder index). This will be
2181 * retained for <Gen12 forever.
2183 data->fw_tc = MEI_INVALID_TRANSCODER;
2185 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2186 data->protocol = (u8)shim->protocol;
2189 data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2190 sizeof(struct hdcp2_streamid_type),
2192 if (!data->streams) {
2193 drm_err(&dev_priv->drm, "Out of Memory\n");
2197 data->streams[0].stream_id = 0;
2198 data->streams[0].stream_type = hdcp->content_type;
2203 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2205 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2208 return (DISPLAY_VER(dev_priv) >= 10 ||
2209 IS_KABYLAKE(dev_priv) ||
2210 IS_COFFEELAKE(dev_priv) ||
2211 IS_COMETLAKE(dev_priv));
2214 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2218 if (!is_hdcp2_supported(dev_priv))
2221 mutex_lock(&dev_priv->hdcp_comp_mutex);
2222 drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
2224 dev_priv->hdcp_comp_added = true;
2225 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2226 ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2227 I915_COMPONENT_HDCP);
2229 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2231 mutex_lock(&dev_priv->hdcp_comp_mutex);
2232 dev_priv->hdcp_comp_added = false;
2233 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2238 static void intel_hdcp2_init(struct intel_connector *connector,
2239 struct intel_digital_port *dig_port,
2240 const struct intel_hdcp_shim *shim)
2242 struct drm_i915_private *i915 = to_i915(connector->base.dev);
2243 struct intel_hdcp *hdcp = &connector->hdcp;
2246 ret = initialize_hdcp_port_data(connector, dig_port, shim);
2248 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2252 hdcp->hdcp2_supported = true;
2255 int intel_hdcp_init(struct intel_connector *connector,
2256 struct intel_digital_port *dig_port,
2257 const struct intel_hdcp_shim *shim)
2259 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2260 struct intel_hdcp *hdcp = &connector->hdcp;
2266 if (is_hdcp2_supported(dev_priv))
2267 intel_hdcp2_init(connector, dig_port, shim);
2270 drm_connector_attach_content_protection_property(&connector->base,
2271 hdcp->hdcp2_supported);
2273 hdcp->hdcp2_supported = false;
2274 kfree(dig_port->hdcp_port_data.streams);
2279 mutex_init(&hdcp->mutex);
2280 INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2281 INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2282 init_waitqueue_head(&hdcp->cp_irq_queue);
2287 int intel_hdcp_enable(struct intel_connector *connector,
2288 const struct intel_crtc_state *pipe_config, u8 content_type)
2290 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2291 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2292 struct intel_hdcp *hdcp = &connector->hdcp;
2293 unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2299 if (!connector->encoder) {
2300 drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2301 connector->base.name, connector->base.base.id);
2305 mutex_lock(&hdcp->mutex);
2306 mutex_lock(&dig_port->hdcp_mutex);
2307 drm_WARN_ON(&dev_priv->drm,
2308 hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2309 hdcp->content_type = content_type;
2311 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2312 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2313 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2315 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2316 hdcp->stream_transcoder = INVALID_TRANSCODER;
2319 if (DISPLAY_VER(dev_priv) >= 12)
2320 dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
2323 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2324 * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2326 if (intel_hdcp2_capable(connector)) {
2327 ret = _intel_hdcp2_enable(connector);
2329 check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2333 * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2336 if (ret && intel_hdcp_capable(connector) &&
2337 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2338 ret = _intel_hdcp_enable(connector);
2342 schedule_delayed_work(&hdcp->check_work, check_link_interval);
2343 intel_hdcp_update_value(connector,
2344 DRM_MODE_CONTENT_PROTECTION_ENABLED,
2348 mutex_unlock(&dig_port->hdcp_mutex);
2349 mutex_unlock(&hdcp->mutex);
2353 int intel_hdcp_disable(struct intel_connector *connector)
2355 struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2356 struct intel_hdcp *hdcp = &connector->hdcp;
2362 mutex_lock(&hdcp->mutex);
2363 mutex_lock(&dig_port->hdcp_mutex);
2365 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2368 intel_hdcp_update_value(connector,
2369 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2370 if (hdcp->hdcp2_encrypted)
2371 ret = _intel_hdcp2_disable(connector, false);
2372 else if (hdcp->hdcp_encrypted)
2373 ret = _intel_hdcp_disable(connector);
2376 mutex_unlock(&dig_port->hdcp_mutex);
2377 mutex_unlock(&hdcp->mutex);
2378 cancel_delayed_work_sync(&hdcp->check_work);
2382 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2383 struct intel_encoder *encoder,
2384 const struct intel_crtc_state *crtc_state,
2385 const struct drm_connector_state *conn_state)
2387 struct intel_connector *connector =
2388 to_intel_connector(conn_state->connector);
2389 struct intel_hdcp *hdcp = &connector->hdcp;
2390 bool content_protection_type_changed, desired_and_not_enabled = false;
2392 if (!connector->hdcp.shim)
2395 content_protection_type_changed =
2396 (conn_state->hdcp_content_type != hdcp->content_type &&
2397 conn_state->content_protection !=
2398 DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2401 * During the HDCP encryption session if Type change is requested,
2402 * disable the HDCP and reenable it with new TYPE value.
2404 if (conn_state->content_protection ==
2405 DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2406 content_protection_type_changed)
2407 intel_hdcp_disable(connector);
2410 * Mark the hdcp state as DESIRED after the hdcp disable of type
2413 if (content_protection_type_changed) {
2414 mutex_lock(&hdcp->mutex);
2415 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2416 drm_connector_get(&connector->base);
2417 schedule_work(&hdcp->prop_work);
2418 mutex_unlock(&hdcp->mutex);
2421 if (conn_state->content_protection ==
2422 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2423 mutex_lock(&hdcp->mutex);
2424 /* Avoid enabling hdcp, if it already ENABLED */
2425 desired_and_not_enabled =
2426 hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2427 mutex_unlock(&hdcp->mutex);
2429 * If HDCP already ENABLED and CP property is DESIRED, schedule
2430 * prop_work to update correct CP property to user space.
2432 if (!desired_and_not_enabled && !content_protection_type_changed) {
2433 drm_connector_get(&connector->base);
2434 schedule_work(&hdcp->prop_work);
2438 if (desired_and_not_enabled || content_protection_type_changed)
2439 intel_hdcp_enable(connector,
2441 (u8)conn_state->hdcp_content_type);
2444 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2446 mutex_lock(&dev_priv->hdcp_comp_mutex);
2447 if (!dev_priv->hdcp_comp_added) {
2448 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2452 dev_priv->hdcp_comp_added = false;
2453 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2455 component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2458 void intel_hdcp_cleanup(struct intel_connector *connector)
2460 struct intel_hdcp *hdcp = &connector->hdcp;
2466 * If the connector is registered, it's possible userspace could kick
2467 * off another HDCP enable, which would re-spawn the workers.
2469 drm_WARN_ON(connector->base.dev,
2470 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2473 * Now that the connector is not registered, check_work won't be run,
2474 * but cancel any outstanding instances of it
2476 cancel_delayed_work_sync(&hdcp->check_work);
2479 * We don't cancel prop_work in the same way as check_work since it
2480 * requires connection_mutex which could be held while calling this
2481 * function. Instead, we rely on the connector references grabbed before
2482 * scheduling prop_work to ensure the connector is alive when prop_work
2483 * is run. So if we're in the destroy path (which is where this
2484 * function should be called), we're "guaranteed" that prop_work is not
2485 * active (tl;dr This Should Never Happen).
2487 drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2489 mutex_lock(&hdcp->mutex);
2491 mutex_unlock(&hdcp->mutex);
2494 void intel_hdcp_atomic_check(struct drm_connector *connector,
2495 struct drm_connector_state *old_state,
2496 struct drm_connector_state *new_state)
2498 u64 old_cp = old_state->content_protection;
2499 u64 new_cp = new_state->content_protection;
2500 struct drm_crtc_state *crtc_state;
2502 if (!new_state->crtc) {
2504 * If the connector is being disabled with CP enabled, mark it
2505 * desired so it's re-enabled when the connector is brought back
2507 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2508 new_state->content_protection =
2509 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2513 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2516 * Fix the HDCP uapi content protection state in case of modeset.
2517 * FIXME: As per HDCP content protection property uapi doc, an uevent()
2518 * need to be sent if there is transition from ENABLED->DESIRED.
2520 if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2521 (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2522 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2523 new_state->content_protection =
2524 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2527 * Nothing to do if the state didn't change, or HDCP was activated since
2528 * the last commit. And also no change in hdcp content type.
2530 if (old_cp == new_cp ||
2531 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2532 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2533 if (old_state->hdcp_content_type ==
2534 new_state->hdcp_content_type)
2538 crtc_state->mode_changed = true;
2541 /* Handles the CP_IRQ raised from the DP HDCP sink */
2542 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2544 struct intel_hdcp *hdcp = &connector->hdcp;
2549 atomic_inc(&connector->hdcp.cp_irq_count);
2550 wake_up_all(&connector->hdcp.cp_irq_queue);
2552 schedule_delayed_work(&hdcp->check_work, 0);