Merge tag 'drm-intel-next-2023-03-07' of git://anongit.freedesktop.org/drm/drm-intel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_regs.h"
27 #include "intel_pcode.h"
28
29 #define KEY_LOAD_TRIES  5
30 #define HDCP2_LC_RETRY_CNT                      3
31
32 static int intel_conn_to_vcpi(struct intel_connector *connector)
33 {
34         struct drm_dp_mst_topology_mgr *mgr;
35         struct drm_dp_mst_atomic_payload *payload;
36         struct drm_dp_mst_topology_state *mst_state;
37         int vcpi = 0;
38
39         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
40         if (!connector->port)
41                 return 0;
42         mgr = connector->port->mgr;
43
44         drm_modeset_lock(&mgr->base.lock, NULL);
45         mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
46         payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
47         if (drm_WARN_ON(mgr->dev, !payload))
48                 goto out;
49
50         vcpi = payload->vcpi;
51         if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
52                 vcpi = 0;
53                 goto out;
54         }
55 out:
56         drm_modeset_unlock(&mgr->base.lock);
57         return vcpi;
58 }
59
60 /*
61  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
62  * content_type for all streams in DP MST topology because security f/w doesn't
63  * have any provision to mark content_type for each stream separately, it marks
64  * all available streams with the content_type proivided at the time of port
65  * authentication. This may prohibit the userspace to use type1 content on
66  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
67  * DP MST topology. Though it is not compulsory, security fw should change its
68  * policy to mark different content_types for different streams.
69  */
70 static int
71 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
72 {
73         struct drm_connector_list_iter conn_iter;
74         struct intel_digital_port *conn_dig_port;
75         struct intel_connector *connector;
76         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
77         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
78         bool enforce_type0 = false;
79         int k;
80
81         data->k = 0;
82
83         if (dig_port->hdcp_auth_status)
84                 return 0;
85
86         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
87         for_each_intel_connector_iter(connector, &conn_iter) {
88                 if (connector->base.status == connector_status_disconnected)
89                         continue;
90
91                 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
92                         continue;
93
94                 conn_dig_port = intel_attached_dig_port(connector);
95                 if (conn_dig_port != dig_port)
96                         continue;
97
98                 if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
99                         enforce_type0 = true;
100
101                 data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
102                 data->k++;
103
104                 /* if there is only one active stream */
105                 if (dig_port->dp.active_mst_links <= 1)
106                         break;
107         }
108         drm_connector_list_iter_end(&conn_iter);
109
110         if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
111                 return -EINVAL;
112
113         /*
114          * Apply common protection level across all streams in DP MST Topology.
115          * Use highest supported content type for all streams in DP MST Topology.
116          */
117         for (k = 0; k < data->k; k++)
118                 data->streams[k].stream_type =
119                         enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
120
121         return 0;
122 }
123
124 static int intel_hdcp_prepare_streams(struct intel_connector *connector)
125 {
126         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
127         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
128         struct intel_hdcp *hdcp = &connector->hdcp;
129         int ret;
130
131         if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
132                 data->k = 1;
133                 data->streams[0].stream_type = hdcp->content_type;
134         } else {
135                 ret = intel_hdcp_required_content_stream(dig_port);
136                 if (ret)
137                         return ret;
138         }
139
140         return 0;
141 }
142
143 static
144 bool intel_hdcp_is_ksv_valid(u8 *ksv)
145 {
146         int i, ones = 0;
147         /* KSV has 20 1's and 20 0's */
148         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
149                 ones += hweight8(ksv[i]);
150         if (ones != 20)
151                 return false;
152
153         return true;
154 }
155
156 static
157 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
158                                const struct intel_hdcp_shim *shim, u8 *bksv)
159 {
160         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
161         int ret, i, tries = 2;
162
163         /* HDCP spec states that we must retry the bksv if it is invalid */
164         for (i = 0; i < tries; i++) {
165                 ret = shim->read_bksv(dig_port, bksv);
166                 if (ret)
167                         return ret;
168                 if (intel_hdcp_is_ksv_valid(bksv))
169                         break;
170         }
171         if (i == tries) {
172                 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
173                 return -ENODEV;
174         }
175
176         return 0;
177 }
178
179 /* Is HDCP1.4 capable on Platform and Sink */
180 bool intel_hdcp_capable(struct intel_connector *connector)
181 {
182         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
183         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
184         bool capable = false;
185         u8 bksv[5];
186
187         if (!shim)
188                 return capable;
189
190         if (shim->hdcp_capable) {
191                 shim->hdcp_capable(dig_port, &capable);
192         } else {
193                 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
194                         capable = true;
195         }
196
197         return capable;
198 }
199
200 /* Is HDCP2.2 capable on Platform and Sink */
201 bool intel_hdcp2_capable(struct intel_connector *connector)
202 {
203         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
204         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
205         struct intel_hdcp *hdcp = &connector->hdcp;
206         bool capable = false;
207
208         /* I915 support for HDCP2.2 */
209         if (!hdcp->hdcp2_supported)
210                 return false;
211
212         /* MEI interface is solid */
213         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
214         if (!dev_priv->display.hdcp.comp_added ||  !dev_priv->display.hdcp.master) {
215                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
216                 return false;
217         }
218         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
219
220         /* Sink's capability for HDCP2.2 */
221         hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
222
223         return capable;
224 }
225
226 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
227                               enum transcoder cpu_transcoder, enum port port)
228 {
229         return intel_de_read(dev_priv,
230                              HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
231                HDCP_STATUS_ENC;
232 }
233
234 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
235                                enum transcoder cpu_transcoder, enum port port)
236 {
237         return intel_de_read(dev_priv,
238                              HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
239                LINK_ENCRYPTION_STATUS;
240 }
241
242 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
243                                     const struct intel_hdcp_shim *shim)
244 {
245         int ret, read_ret;
246         bool ksv_ready;
247
248         /* Poll for ksv list ready (spec says max time allowed is 5s) */
249         ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
250                                                          &ksv_ready),
251                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
252                          100 * 1000);
253         if (ret)
254                 return ret;
255         if (read_ret)
256                 return read_ret;
257         if (!ksv_ready)
258                 return -ETIMEDOUT;
259
260         return 0;
261 }
262
263 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
264 {
265         enum i915_power_well_id id;
266         intel_wakeref_t wakeref;
267         bool enabled = false;
268
269         /*
270          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
271          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
272          */
273         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
274                 id = HSW_DISP_PW_GLOBAL;
275         else
276                 id = SKL_DISP_PW_1;
277
278         /* PG1 (power well #1) needs to be enabled */
279         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
280                 enabled = intel_display_power_well_is_enabled(dev_priv, id);
281
282         /*
283          * Another req for hdcp key loadability is enabled state of pll for
284          * cdclk. Without active crtc we wont land here. So we are assuming that
285          * cdclk is already on.
286          */
287
288         return enabled;
289 }
290
291 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
292 {
293         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
294         intel_de_write(dev_priv, HDCP_KEY_STATUS,
295                        HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
296 }
297
298 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
299 {
300         int ret;
301         u32 val;
302
303         val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
304         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
305                 return 0;
306
307         /*
308          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
309          * out of reset. So if Key is not already loaded, its an error state.
310          */
311         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
312                 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
313                         return -ENXIO;
314
315         /*
316          * Initiate loading the HDCP key from fuses.
317          *
318          * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
319          * version 9 platforms (minus BXT) differ in the key load trigger
320          * process from other platforms. These platforms use the GT Driver
321          * Mailbox interface.
322          */
323         if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
324                 ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
325                 if (ret) {
326                         drm_err(&dev_priv->drm,
327                                 "Failed to initiate HDCP key load (%d)\n",
328                                 ret);
329                         return ret;
330                 }
331         } else {
332                 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
333         }
334
335         /* Wait for the keys to load (500us) */
336         ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
337                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
338                                         10, 1, &val);
339         if (ret)
340                 return ret;
341         else if (!(val & HDCP_KEY_LOAD_STATUS))
342                 return -ENXIO;
343
344         /* Send Aksv over to PCH display for use in authentication */
345         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
346
347         return 0;
348 }
349
350 /* Returns updated SHA-1 index */
351 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
352 {
353         intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
354         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
355                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
356                 return -ETIMEDOUT;
357         }
358         return 0;
359 }
360
361 static
362 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
363                                 enum transcoder cpu_transcoder, enum port port)
364 {
365         if (DISPLAY_VER(dev_priv) >= 12) {
366                 switch (cpu_transcoder) {
367                 case TRANSCODER_A:
368                         return HDCP_TRANSA_REP_PRESENT |
369                                HDCP_TRANSA_SHA1_M0;
370                 case TRANSCODER_B:
371                         return HDCP_TRANSB_REP_PRESENT |
372                                HDCP_TRANSB_SHA1_M0;
373                 case TRANSCODER_C:
374                         return HDCP_TRANSC_REP_PRESENT |
375                                HDCP_TRANSC_SHA1_M0;
376                 case TRANSCODER_D:
377                         return HDCP_TRANSD_REP_PRESENT |
378                                HDCP_TRANSD_SHA1_M0;
379                 default:
380                         drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
381                                 cpu_transcoder);
382                         return -EINVAL;
383                 }
384         }
385
386         switch (port) {
387         case PORT_A:
388                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
389         case PORT_B:
390                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
391         case PORT_C:
392                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
393         case PORT_D:
394                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
395         case PORT_E:
396                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
397         default:
398                 drm_err(&dev_priv->drm, "Unknown port %d\n", port);
399                 return -EINVAL;
400         }
401 }
402
403 static
404 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
405                                 const struct intel_hdcp_shim *shim,
406                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
407 {
408         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
409         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
410         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
411         enum port port = dig_port->base.port;
412         u32 vprime, sha_text, sha_leftovers, rep_ctl;
413         int ret, i, j, sha_idx;
414
415         /* Process V' values from the receiver */
416         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
417                 ret = shim->read_v_prime_part(dig_port, i, &vprime);
418                 if (ret)
419                         return ret;
420                 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
421         }
422
423         /*
424          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
425          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
426          * stream is written via the HDCP_SHA_TEXT register in 32-bit
427          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
428          * index will keep track of our progress through the 64 bytes as well as
429          * helping us work the 40-bit KSVs through our 32-bit register.
430          *
431          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
432          */
433         sha_idx = 0;
434         sha_text = 0;
435         sha_leftovers = 0;
436         rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
437         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
438         for (i = 0; i < num_downstream; i++) {
439                 unsigned int sha_empty;
440                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
441
442                 /* Fill up the empty slots in sha_text and write it out */
443                 sha_empty = sizeof(sha_text) - sha_leftovers;
444                 for (j = 0; j < sha_empty; j++) {
445                         u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
446                         sha_text |= ksv[j] << off;
447                 }
448
449                 ret = intel_write_sha_text(dev_priv, sha_text);
450                 if (ret < 0)
451                         return ret;
452
453                 /* Programming guide writes this every 64 bytes */
454                 sha_idx += sizeof(sha_text);
455                 if (!(sha_idx % 64))
456                         intel_de_write(dev_priv, HDCP_REP_CTL,
457                                        rep_ctl | HDCP_SHA1_TEXT_32);
458
459                 /* Store the leftover bytes from the ksv in sha_text */
460                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
461                 sha_text = 0;
462                 for (j = 0; j < sha_leftovers; j++)
463                         sha_text |= ksv[sha_empty + j] <<
464                                         ((sizeof(sha_text) - j - 1) * 8);
465
466                 /*
467                  * If we still have room in sha_text for more data, continue.
468                  * Otherwise, write it out immediately.
469                  */
470                 if (sizeof(sha_text) > sha_leftovers)
471                         continue;
472
473                 ret = intel_write_sha_text(dev_priv, sha_text);
474                 if (ret < 0)
475                         return ret;
476                 sha_leftovers = 0;
477                 sha_text = 0;
478                 sha_idx += sizeof(sha_text);
479         }
480
481         /*
482          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
483          * bytes are leftover from the last ksv, we might be able to fit them
484          * all in sha_text (first 2 cases), or we might need to split them up
485          * into 2 writes (last 2 cases).
486          */
487         if (sha_leftovers == 0) {
488                 /* Write 16 bits of text, 16 bits of M0 */
489                 intel_de_write(dev_priv, HDCP_REP_CTL,
490                                rep_ctl | HDCP_SHA1_TEXT_16);
491                 ret = intel_write_sha_text(dev_priv,
492                                            bstatus[0] << 8 | bstatus[1]);
493                 if (ret < 0)
494                         return ret;
495                 sha_idx += sizeof(sha_text);
496
497                 /* Write 32 bits of M0 */
498                 intel_de_write(dev_priv, HDCP_REP_CTL,
499                                rep_ctl | HDCP_SHA1_TEXT_0);
500                 ret = intel_write_sha_text(dev_priv, 0);
501                 if (ret < 0)
502                         return ret;
503                 sha_idx += sizeof(sha_text);
504
505                 /* Write 16 bits of M0 */
506                 intel_de_write(dev_priv, HDCP_REP_CTL,
507                                rep_ctl | HDCP_SHA1_TEXT_16);
508                 ret = intel_write_sha_text(dev_priv, 0);
509                 if (ret < 0)
510                         return ret;
511                 sha_idx += sizeof(sha_text);
512
513         } else if (sha_leftovers == 1) {
514                 /* Write 24 bits of text, 8 bits of M0 */
515                 intel_de_write(dev_priv, HDCP_REP_CTL,
516                                rep_ctl | HDCP_SHA1_TEXT_24);
517                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
518                 /* Only 24-bits of data, must be in the LSB */
519                 sha_text = (sha_text & 0xffffff00) >> 8;
520                 ret = intel_write_sha_text(dev_priv, sha_text);
521                 if (ret < 0)
522                         return ret;
523                 sha_idx += sizeof(sha_text);
524
525                 /* Write 32 bits of M0 */
526                 intel_de_write(dev_priv, HDCP_REP_CTL,
527                                rep_ctl | HDCP_SHA1_TEXT_0);
528                 ret = intel_write_sha_text(dev_priv, 0);
529                 if (ret < 0)
530                         return ret;
531                 sha_idx += sizeof(sha_text);
532
533                 /* Write 24 bits of M0 */
534                 intel_de_write(dev_priv, HDCP_REP_CTL,
535                                rep_ctl | HDCP_SHA1_TEXT_8);
536                 ret = intel_write_sha_text(dev_priv, 0);
537                 if (ret < 0)
538                         return ret;
539                 sha_idx += sizeof(sha_text);
540
541         } else if (sha_leftovers == 2) {
542                 /* Write 32 bits of text */
543                 intel_de_write(dev_priv, HDCP_REP_CTL,
544                                rep_ctl | HDCP_SHA1_TEXT_32);
545                 sha_text |= bstatus[0] << 8 | bstatus[1];
546                 ret = intel_write_sha_text(dev_priv, sha_text);
547                 if (ret < 0)
548                         return ret;
549                 sha_idx += sizeof(sha_text);
550
551                 /* Write 64 bits of M0 */
552                 intel_de_write(dev_priv, HDCP_REP_CTL,
553                                rep_ctl | HDCP_SHA1_TEXT_0);
554                 for (i = 0; i < 2; i++) {
555                         ret = intel_write_sha_text(dev_priv, 0);
556                         if (ret < 0)
557                                 return ret;
558                         sha_idx += sizeof(sha_text);
559                 }
560
561                 /*
562                  * Terminate the SHA-1 stream by hand. For the other leftover
563                  * cases this is appended by the hardware.
564                  */
565                 intel_de_write(dev_priv, HDCP_REP_CTL,
566                                rep_ctl | HDCP_SHA1_TEXT_32);
567                 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
568                 ret = intel_write_sha_text(dev_priv, sha_text);
569                 if (ret < 0)
570                         return ret;
571                 sha_idx += sizeof(sha_text);
572         } else if (sha_leftovers == 3) {
573                 /* Write 32 bits of text (filled from LSB) */
574                 intel_de_write(dev_priv, HDCP_REP_CTL,
575                                rep_ctl | HDCP_SHA1_TEXT_32);
576                 sha_text |= bstatus[0];
577                 ret = intel_write_sha_text(dev_priv, sha_text);
578                 if (ret < 0)
579                         return ret;
580                 sha_idx += sizeof(sha_text);
581
582                 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
583                 intel_de_write(dev_priv, HDCP_REP_CTL,
584                                rep_ctl | HDCP_SHA1_TEXT_8);
585                 ret = intel_write_sha_text(dev_priv, bstatus[1]);
586                 if (ret < 0)
587                         return ret;
588                 sha_idx += sizeof(sha_text);
589
590                 /* Write 32 bits of M0 */
591                 intel_de_write(dev_priv, HDCP_REP_CTL,
592                                rep_ctl | HDCP_SHA1_TEXT_0);
593                 ret = intel_write_sha_text(dev_priv, 0);
594                 if (ret < 0)
595                         return ret;
596                 sha_idx += sizeof(sha_text);
597
598                 /* Write 8 bits of M0 */
599                 intel_de_write(dev_priv, HDCP_REP_CTL,
600                                rep_ctl | HDCP_SHA1_TEXT_24);
601                 ret = intel_write_sha_text(dev_priv, 0);
602                 if (ret < 0)
603                         return ret;
604                 sha_idx += sizeof(sha_text);
605         } else {
606                 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
607                             sha_leftovers);
608                 return -EINVAL;
609         }
610
611         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
612         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
613         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
614                 ret = intel_write_sha_text(dev_priv, 0);
615                 if (ret < 0)
616                         return ret;
617                 sha_idx += sizeof(sha_text);
618         }
619
620         /*
621          * Last write gets the length of the concatenation in bits. That is:
622          *  - 5 bytes per device
623          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
624          */
625         sha_text = (num_downstream * 5 + 10) * 8;
626         ret = intel_write_sha_text(dev_priv, sha_text);
627         if (ret < 0)
628                 return ret;
629
630         /* Tell the HW we're done with the hash and wait for it to ACK */
631         intel_de_write(dev_priv, HDCP_REP_CTL,
632                        rep_ctl | HDCP_SHA1_COMPLETE_HASH);
633         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
634                                   HDCP_SHA1_COMPLETE, 1)) {
635                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
636                 return -ETIMEDOUT;
637         }
638         if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
639                 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
640                 return -ENXIO;
641         }
642
643         return 0;
644 }
645
646 /* Implements Part 2 of the HDCP authorization procedure */
647 static
648 int intel_hdcp_auth_downstream(struct intel_connector *connector)
649 {
650         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
651         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
652         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
653         u8 bstatus[2], num_downstream, *ksv_fifo;
654         int ret, i, tries = 3;
655
656         ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
657         if (ret) {
658                 drm_dbg_kms(&dev_priv->drm,
659                             "KSV list failed to become ready (%d)\n", ret);
660                 return ret;
661         }
662
663         ret = shim->read_bstatus(dig_port, bstatus);
664         if (ret)
665                 return ret;
666
667         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
668             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
669                 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
670                 return -EPERM;
671         }
672
673         /*
674          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
675          * the HDCP encryption. That implies that repeater can't have its own
676          * display. As there is no consumption of encrypted content in the
677          * repeater with 0 downstream devices, we are failing the
678          * authentication.
679          */
680         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
681         if (num_downstream == 0) {
682                 drm_dbg_kms(&dev_priv->drm,
683                             "Repeater with zero downstream devices\n");
684                 return -EINVAL;
685         }
686
687         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
688         if (!ksv_fifo) {
689                 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
690                 return -ENOMEM;
691         }
692
693         ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
694         if (ret)
695                 goto err;
696
697         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
698                                         num_downstream) > 0) {
699                 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
700                 ret = -EPERM;
701                 goto err;
702         }
703
704         /*
705          * When V prime mismatches, DP Spec mandates re-read of
706          * V prime atleast twice.
707          */
708         for (i = 0; i < tries; i++) {
709                 ret = intel_hdcp_validate_v_prime(connector, shim,
710                                                   ksv_fifo, num_downstream,
711                                                   bstatus);
712                 if (!ret)
713                         break;
714         }
715
716         if (i == tries) {
717                 drm_dbg_kms(&dev_priv->drm,
718                             "V Prime validation failed.(%d)\n", ret);
719                 goto err;
720         }
721
722         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
723                     num_downstream);
724         ret = 0;
725 err:
726         kfree(ksv_fifo);
727         return ret;
728 }
729
730 /* Implements Part 1 of the HDCP authorization procedure */
731 static int intel_hdcp_auth(struct intel_connector *connector)
732 {
733         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
734         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
735         struct intel_hdcp *hdcp = &connector->hdcp;
736         const struct intel_hdcp_shim *shim = hdcp->shim;
737         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
738         enum port port = dig_port->base.port;
739         unsigned long r0_prime_gen_start;
740         int ret, i, tries = 2;
741         union {
742                 u32 reg[2];
743                 u8 shim[DRM_HDCP_AN_LEN];
744         } an;
745         union {
746                 u32 reg[2];
747                 u8 shim[DRM_HDCP_KSV_LEN];
748         } bksv;
749         union {
750                 u32 reg;
751                 u8 shim[DRM_HDCP_RI_LEN];
752         } ri;
753         bool repeater_present, hdcp_capable;
754
755         /*
756          * Detects whether the display is HDCP capable. Although we check for
757          * valid Bksv below, the HDCP over DP spec requires that we check
758          * whether the display supports HDCP before we write An. For HDMI
759          * displays, this is not necessary.
760          */
761         if (shim->hdcp_capable) {
762                 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
763                 if (ret)
764                         return ret;
765                 if (!hdcp_capable) {
766                         drm_dbg_kms(&dev_priv->drm,
767                                     "Panel is not HDCP capable\n");
768                         return -EINVAL;
769                 }
770         }
771
772         /* Initialize An with 2 random values and acquire it */
773         for (i = 0; i < 2; i++)
774                 intel_de_write(dev_priv,
775                                HDCP_ANINIT(dev_priv, cpu_transcoder, port),
776                                get_random_u32());
777         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
778                        HDCP_CONF_CAPTURE_AN);
779
780         /* Wait for An to be acquired */
781         if (intel_de_wait_for_set(dev_priv,
782                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
783                                   HDCP_STATUS_AN_READY, 1)) {
784                 drm_err(&dev_priv->drm, "Timed out waiting for An\n");
785                 return -ETIMEDOUT;
786         }
787
788         an.reg[0] = intel_de_read(dev_priv,
789                                   HDCP_ANLO(dev_priv, cpu_transcoder, port));
790         an.reg[1] = intel_de_read(dev_priv,
791                                   HDCP_ANHI(dev_priv, cpu_transcoder, port));
792         ret = shim->write_an_aksv(dig_port, an.shim);
793         if (ret)
794                 return ret;
795
796         r0_prime_gen_start = jiffies;
797
798         memset(&bksv, 0, sizeof(bksv));
799
800         ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
801         if (ret < 0)
802                 return ret;
803
804         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
805                 drm_err(&dev_priv->drm, "BKSV is revoked\n");
806                 return -EPERM;
807         }
808
809         intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
810                        bksv.reg[0]);
811         intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
812                        bksv.reg[1]);
813
814         ret = shim->repeater_present(dig_port, &repeater_present);
815         if (ret)
816                 return ret;
817         if (repeater_present)
818                 intel_de_write(dev_priv, HDCP_REP_CTL,
819                                intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
820
821         ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
822         if (ret)
823                 return ret;
824
825         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
826                        HDCP_CONF_AUTH_AND_ENC);
827
828         /* Wait for R0 ready */
829         if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
830                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
831                 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
832                 return -ETIMEDOUT;
833         }
834
835         /*
836          * Wait for R0' to become available. The spec says 100ms from Aksv, but
837          * some monitors can take longer than this. We'll set the timeout at
838          * 300ms just to be sure.
839          *
840          * On DP, there's an R0_READY bit available but no such bit
841          * exists on HDMI. Since the upper-bound is the same, we'll just do
842          * the stupid thing instead of polling on one and not the other.
843          */
844         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
845
846         tries = 3;
847
848         /*
849          * DP HDCP Spec mandates the two more reattempt to read R0, incase
850          * of R0 mismatch.
851          */
852         for (i = 0; i < tries; i++) {
853                 ri.reg = 0;
854                 ret = shim->read_ri_prime(dig_port, ri.shim);
855                 if (ret)
856                         return ret;
857                 intel_de_write(dev_priv,
858                                HDCP_RPRIME(dev_priv, cpu_transcoder, port),
859                                ri.reg);
860
861                 /* Wait for Ri prime match */
862                 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
863                               (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
864                         break;
865         }
866
867         if (i == tries) {
868                 drm_dbg_kms(&dev_priv->drm,
869                             "Timed out waiting for Ri prime match (%x)\n",
870                             intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
871                                           cpu_transcoder, port)));
872                 return -ETIMEDOUT;
873         }
874
875         /* Wait for encryption confirmation */
876         if (intel_de_wait_for_set(dev_priv,
877                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
878                                   HDCP_STATUS_ENC,
879                                   HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
880                 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
881                 return -ETIMEDOUT;
882         }
883
884         /* DP MST Auth Part 1 Step 2.a and Step 2.b */
885         if (shim->stream_encryption) {
886                 ret = shim->stream_encryption(connector, true);
887                 if (ret) {
888                         drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
889                                 connector->base.name, connector->base.base.id);
890                         return ret;
891                 }
892                 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
893                             transcoder_name(hdcp->stream_transcoder));
894         }
895
896         if (repeater_present)
897                 return intel_hdcp_auth_downstream(connector);
898
899         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
900         return 0;
901 }
902
903 static int _intel_hdcp_disable(struct intel_connector *connector)
904 {
905         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
906         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
907         struct intel_hdcp *hdcp = &connector->hdcp;
908         enum port port = dig_port->base.port;
909         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
910         u32 repeater_ctl;
911         int ret;
912
913         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
914                     connector->base.name, connector->base.base.id);
915
916         if (hdcp->shim->stream_encryption) {
917                 ret = hdcp->shim->stream_encryption(connector, false);
918                 if (ret) {
919                         drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
920                                 connector->base.name, connector->base.base.id);
921                         return ret;
922                 }
923                 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
924                             transcoder_name(hdcp->stream_transcoder));
925                 /*
926                  * If there are other connectors on this port using HDCP,
927                  * don't disable it until it disabled HDCP encryption for
928                  * all connectors in MST topology.
929                  */
930                 if (dig_port->num_hdcp_streams > 0)
931                         return 0;
932         }
933
934         hdcp->hdcp_encrypted = false;
935         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
936         if (intel_de_wait_for_clear(dev_priv,
937                                     HDCP_STATUS(dev_priv, cpu_transcoder, port),
938                                     ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
939                 drm_err(&dev_priv->drm,
940                         "Failed to disable HDCP, timeout clearing status\n");
941                 return -ETIMEDOUT;
942         }
943
944         repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
945                                                    port);
946         intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0);
947
948         ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
949         if (ret) {
950                 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
951                 return ret;
952         }
953
954         drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
955         return 0;
956 }
957
958 static int _intel_hdcp_enable(struct intel_connector *connector)
959 {
960         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
961         struct intel_hdcp *hdcp = &connector->hdcp;
962         int i, ret, tries = 3;
963
964         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
965                     connector->base.name, connector->base.base.id);
966
967         if (!hdcp_key_loadable(dev_priv)) {
968                 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
969                 return -ENXIO;
970         }
971
972         for (i = 0; i < KEY_LOAD_TRIES; i++) {
973                 ret = intel_hdcp_load_keys(dev_priv);
974                 if (!ret)
975                         break;
976                 intel_hdcp_clear_keys(dev_priv);
977         }
978         if (ret) {
979                 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
980                         ret);
981                 return ret;
982         }
983
984         /* Incase of authentication failures, HDCP spec expects reauth. */
985         for (i = 0; i < tries; i++) {
986                 ret = intel_hdcp_auth(connector);
987                 if (!ret) {
988                         hdcp->hdcp_encrypted = true;
989                         return 0;
990                 }
991
992                 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
993
994                 /* Ensuring HDCP encryption and signalling are stopped. */
995                 _intel_hdcp_disable(connector);
996         }
997
998         drm_dbg_kms(&dev_priv->drm,
999                     "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1000         return ret;
1001 }
1002
1003 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1004 {
1005         return container_of(hdcp, struct intel_connector, hdcp);
1006 }
1007
1008 static void intel_hdcp_update_value(struct intel_connector *connector,
1009                                     u64 value, bool update_property)
1010 {
1011         struct drm_device *dev = connector->base.dev;
1012         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1013         struct intel_hdcp *hdcp = &connector->hdcp;
1014
1015         drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
1016
1017         if (hdcp->value == value)
1018                 return;
1019
1020         drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
1021
1022         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1023                 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1024                         dig_port->num_hdcp_streams--;
1025         } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1026                 dig_port->num_hdcp_streams++;
1027         }
1028
1029         hdcp->value = value;
1030         if (update_property) {
1031                 drm_connector_get(&connector->base);
1032                 schedule_work(&hdcp->prop_work);
1033         }
1034 }
1035
1036 /* Implements Part 3 of the HDCP authorization procedure */
1037 static int intel_hdcp_check_link(struct intel_connector *connector)
1038 {
1039         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1040         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1041         struct intel_hdcp *hdcp = &connector->hdcp;
1042         enum port port = dig_port->base.port;
1043         enum transcoder cpu_transcoder;
1044         int ret = 0;
1045
1046         mutex_lock(&hdcp->mutex);
1047         mutex_lock(&dig_port->hdcp_mutex);
1048
1049         cpu_transcoder = hdcp->cpu_transcoder;
1050
1051         /* Check_link valid only when HDCP1.4 is enabled */
1052         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1053             !hdcp->hdcp_encrypted) {
1054                 ret = -EINVAL;
1055                 goto out;
1056         }
1057
1058         if (drm_WARN_ON(&dev_priv->drm,
1059                         !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1060                 drm_err(&dev_priv->drm,
1061                         "%s:%d HDCP link stopped encryption,%x\n",
1062                         connector->base.name, connector->base.base.id,
1063                         intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1064                 ret = -ENXIO;
1065                 intel_hdcp_update_value(connector,
1066                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1067                                         true);
1068                 goto out;
1069         }
1070
1071         if (hdcp->shim->check_link(dig_port, connector)) {
1072                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1073                         intel_hdcp_update_value(connector,
1074                                 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1075                 }
1076                 goto out;
1077         }
1078
1079         drm_dbg_kms(&dev_priv->drm,
1080                     "[%s:%d] HDCP link failed, retrying authentication\n",
1081                     connector->base.name, connector->base.base.id);
1082
1083         ret = _intel_hdcp_disable(connector);
1084         if (ret) {
1085                 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1086                 intel_hdcp_update_value(connector,
1087                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1088                                         true);
1089                 goto out;
1090         }
1091
1092         ret = _intel_hdcp_enable(connector);
1093         if (ret) {
1094                 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1095                 intel_hdcp_update_value(connector,
1096                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1097                                         true);
1098                 goto out;
1099         }
1100
1101 out:
1102         mutex_unlock(&dig_port->hdcp_mutex);
1103         mutex_unlock(&hdcp->mutex);
1104         return ret;
1105 }
1106
1107 static void intel_hdcp_prop_work(struct work_struct *work)
1108 {
1109         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1110                                                prop_work);
1111         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1112         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1113
1114         drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1115         mutex_lock(&hdcp->mutex);
1116
1117         /*
1118          * This worker is only used to flip between ENABLED/DESIRED. Either of
1119          * those to UNDESIRED is handled by core. If value == UNDESIRED,
1120          * we're running just after hdcp has been disabled, so just exit
1121          */
1122         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1123                 drm_hdcp_update_content_protection(&connector->base,
1124                                                    hdcp->value);
1125
1126         mutex_unlock(&hdcp->mutex);
1127         drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1128
1129         drm_connector_put(&connector->base);
1130 }
1131
1132 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1133 {
1134         return RUNTIME_INFO(dev_priv)->has_hdcp &&
1135                 (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1136 }
1137
1138 static int
1139 hdcp2_prepare_ake_init(struct intel_connector *connector,
1140                        struct hdcp2_ake_init *ake_data)
1141 {
1142         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1143         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1144         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1145         struct i915_hdcp_comp_master *comp;
1146         int ret;
1147
1148         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1149         comp = dev_priv->display.hdcp.master;
1150
1151         if (!comp || !comp->ops) {
1152                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1153                 return -EINVAL;
1154         }
1155
1156         ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1157         if (ret)
1158                 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1159                             ret);
1160         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1161
1162         return ret;
1163 }
1164
1165 static int
1166 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1167                                 struct hdcp2_ake_send_cert *rx_cert,
1168                                 bool *paired,
1169                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
1170                                 size_t *msg_sz)
1171 {
1172         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1173         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1174         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1175         struct i915_hdcp_comp_master *comp;
1176         int ret;
1177
1178         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1179         comp = dev_priv->display.hdcp.master;
1180
1181         if (!comp || !comp->ops) {
1182                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1183                 return -EINVAL;
1184         }
1185
1186         ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1187                                                          rx_cert, paired,
1188                                                          ek_pub_km, msg_sz);
1189         if (ret < 0)
1190                 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1191                             ret);
1192         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1193
1194         return ret;
1195 }
1196
1197 static int hdcp2_verify_hprime(struct intel_connector *connector,
1198                                struct hdcp2_ake_send_hprime *rx_hprime)
1199 {
1200         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1201         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1202         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1203         struct i915_hdcp_comp_master *comp;
1204         int ret;
1205
1206         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1207         comp = dev_priv->display.hdcp.master;
1208
1209         if (!comp || !comp->ops) {
1210                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1211                 return -EINVAL;
1212         }
1213
1214         ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1215         if (ret < 0)
1216                 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1217         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1218
1219         return ret;
1220 }
1221
1222 static int
1223 hdcp2_store_pairing_info(struct intel_connector *connector,
1224                          struct hdcp2_ake_send_pairing_info *pairing_info)
1225 {
1226         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1227         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1228         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1229         struct i915_hdcp_comp_master *comp;
1230         int ret;
1231
1232         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1233         comp = dev_priv->display.hdcp.master;
1234
1235         if (!comp || !comp->ops) {
1236                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1237                 return -EINVAL;
1238         }
1239
1240         ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1241         if (ret < 0)
1242                 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1243                             ret);
1244         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1245
1246         return ret;
1247 }
1248
1249 static int
1250 hdcp2_prepare_lc_init(struct intel_connector *connector,
1251                       struct hdcp2_lc_init *lc_init)
1252 {
1253         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1254         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1255         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1256         struct i915_hdcp_comp_master *comp;
1257         int ret;
1258
1259         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1260         comp = dev_priv->display.hdcp.master;
1261
1262         if (!comp || !comp->ops) {
1263                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1264                 return -EINVAL;
1265         }
1266
1267         ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1268         if (ret < 0)
1269                 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1270                             ret);
1271         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1272
1273         return ret;
1274 }
1275
1276 static int
1277 hdcp2_verify_lprime(struct intel_connector *connector,
1278                     struct hdcp2_lc_send_lprime *rx_lprime)
1279 {
1280         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1281         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1282         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1283         struct i915_hdcp_comp_master *comp;
1284         int ret;
1285
1286         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1287         comp = dev_priv->display.hdcp.master;
1288
1289         if (!comp || !comp->ops) {
1290                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1291                 return -EINVAL;
1292         }
1293
1294         ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1295         if (ret < 0)
1296                 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1297                             ret);
1298         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1299
1300         return ret;
1301 }
1302
1303 static int hdcp2_prepare_skey(struct intel_connector *connector,
1304                               struct hdcp2_ske_send_eks *ske_data)
1305 {
1306         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1307         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1308         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1309         struct i915_hdcp_comp_master *comp;
1310         int ret;
1311
1312         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1313         comp = dev_priv->display.hdcp.master;
1314
1315         if (!comp || !comp->ops) {
1316                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1317                 return -EINVAL;
1318         }
1319
1320         ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1321         if (ret < 0)
1322                 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1323                             ret);
1324         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1325
1326         return ret;
1327 }
1328
1329 static int
1330 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1331                                       struct hdcp2_rep_send_receiverid_list
1332                                                                 *rep_topology,
1333                                       struct hdcp2_rep_send_ack *rep_send_ack)
1334 {
1335         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1336         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1337         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1338         struct i915_hdcp_comp_master *comp;
1339         int ret;
1340
1341         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1342         comp = dev_priv->display.hdcp.master;
1343
1344         if (!comp || !comp->ops) {
1345                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1346                 return -EINVAL;
1347         }
1348
1349         ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1350                                                          rep_topology,
1351                                                          rep_send_ack);
1352         if (ret < 0)
1353                 drm_dbg_kms(&dev_priv->drm,
1354                             "Verify rep topology failed. %d\n", ret);
1355         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1356
1357         return ret;
1358 }
1359
1360 static int
1361 hdcp2_verify_mprime(struct intel_connector *connector,
1362                     struct hdcp2_rep_stream_ready *stream_ready)
1363 {
1364         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1365         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1366         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1367         struct i915_hdcp_comp_master *comp;
1368         int ret;
1369
1370         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1371         comp = dev_priv->display.hdcp.master;
1372
1373         if (!comp || !comp->ops) {
1374                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1375                 return -EINVAL;
1376         }
1377
1378         ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1379         if (ret < 0)
1380                 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1381         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1382
1383         return ret;
1384 }
1385
1386 static int hdcp2_authenticate_port(struct intel_connector *connector)
1387 {
1388         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1389         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1390         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1391         struct i915_hdcp_comp_master *comp;
1392         int ret;
1393
1394         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1395         comp = dev_priv->display.hdcp.master;
1396
1397         if (!comp || !comp->ops) {
1398                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1399                 return -EINVAL;
1400         }
1401
1402         ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1403         if (ret < 0)
1404                 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1405                             ret);
1406         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1407
1408         return ret;
1409 }
1410
1411 static int hdcp2_close_mei_session(struct intel_connector *connector)
1412 {
1413         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1414         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1415         struct i915_hdcp_comp_master *comp;
1416         int ret;
1417
1418         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1419         comp = dev_priv->display.hdcp.master;
1420
1421         if (!comp || !comp->ops) {
1422                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1423                 return -EINVAL;
1424         }
1425
1426         ret = comp->ops->close_hdcp_session(comp->mei_dev,
1427                                              &dig_port->hdcp_port_data);
1428         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1429
1430         return ret;
1431 }
1432
1433 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1434 {
1435         return hdcp2_close_mei_session(connector);
1436 }
1437
1438 /* Authentication flow starts from here */
1439 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1440 {
1441         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1442         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1443         struct intel_hdcp *hdcp = &connector->hdcp;
1444         union {
1445                 struct hdcp2_ake_init ake_init;
1446                 struct hdcp2_ake_send_cert send_cert;
1447                 struct hdcp2_ake_no_stored_km no_stored_km;
1448                 struct hdcp2_ake_send_hprime send_hprime;
1449                 struct hdcp2_ake_send_pairing_info pairing_info;
1450         } msgs;
1451         const struct intel_hdcp_shim *shim = hdcp->shim;
1452         size_t size;
1453         int ret;
1454
1455         /* Init for seq_num */
1456         hdcp->seq_num_v = 0;
1457         hdcp->seq_num_m = 0;
1458
1459         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1460         if (ret < 0)
1461                 return ret;
1462
1463         ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1464                                   sizeof(msgs.ake_init));
1465         if (ret < 0)
1466                 return ret;
1467
1468         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1469                                  &msgs.send_cert, sizeof(msgs.send_cert));
1470         if (ret < 0)
1471                 return ret;
1472
1473         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1474                 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1475                 return -EINVAL;
1476         }
1477
1478         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1479
1480         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1481                                         msgs.send_cert.cert_rx.receiver_id,
1482                                         1) > 0) {
1483                 drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1484                 return -EPERM;
1485         }
1486
1487         /*
1488          * Here msgs.no_stored_km will hold msgs corresponding to the km
1489          * stored also.
1490          */
1491         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1492                                               &hdcp->is_paired,
1493                                               &msgs.no_stored_km, &size);
1494         if (ret < 0)
1495                 return ret;
1496
1497         ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1498         if (ret < 0)
1499                 return ret;
1500
1501         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1502                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1503         if (ret < 0)
1504                 return ret;
1505
1506         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1507         if (ret < 0)
1508                 return ret;
1509
1510         if (!hdcp->is_paired) {
1511                 /* Pairing is required */
1512                 ret = shim->read_2_2_msg(dig_port,
1513                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1514                                          &msgs.pairing_info,
1515                                          sizeof(msgs.pairing_info));
1516                 if (ret < 0)
1517                         return ret;
1518
1519                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1520                 if (ret < 0)
1521                         return ret;
1522                 hdcp->is_paired = true;
1523         }
1524
1525         return 0;
1526 }
1527
1528 static int hdcp2_locality_check(struct intel_connector *connector)
1529 {
1530         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1531         struct intel_hdcp *hdcp = &connector->hdcp;
1532         union {
1533                 struct hdcp2_lc_init lc_init;
1534                 struct hdcp2_lc_send_lprime send_lprime;
1535         } msgs;
1536         const struct intel_hdcp_shim *shim = hdcp->shim;
1537         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1538
1539         for (i = 0; i < tries; i++) {
1540                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1541                 if (ret < 0)
1542                         continue;
1543
1544                 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1545                                       sizeof(msgs.lc_init));
1546                 if (ret < 0)
1547                         continue;
1548
1549                 ret = shim->read_2_2_msg(dig_port,
1550                                          HDCP_2_2_LC_SEND_LPRIME,
1551                                          &msgs.send_lprime,
1552                                          sizeof(msgs.send_lprime));
1553                 if (ret < 0)
1554                         continue;
1555
1556                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1557                 if (!ret)
1558                         break;
1559         }
1560
1561         return ret;
1562 }
1563
1564 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1565 {
1566         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1567         struct intel_hdcp *hdcp = &connector->hdcp;
1568         struct hdcp2_ske_send_eks send_eks;
1569         int ret;
1570
1571         ret = hdcp2_prepare_skey(connector, &send_eks);
1572         if (ret < 0)
1573                 return ret;
1574
1575         ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1576                                         sizeof(send_eks));
1577         if (ret < 0)
1578                 return ret;
1579
1580         return 0;
1581 }
1582
1583 static
1584 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1585 {
1586         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1587         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1588         struct intel_hdcp *hdcp = &connector->hdcp;
1589         union {
1590                 struct hdcp2_rep_stream_manage stream_manage;
1591                 struct hdcp2_rep_stream_ready stream_ready;
1592         } msgs;
1593         const struct intel_hdcp_shim *shim = hdcp->shim;
1594         int ret, streams_size_delta, i;
1595
1596         if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1597                 return -ERANGE;
1598
1599         /* Prepare RepeaterAuth_Stream_Manage msg */
1600         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1601         drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1602
1603         msgs.stream_manage.k = cpu_to_be16(data->k);
1604
1605         for (i = 0; i < data->k; i++) {
1606                 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1607                 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1608         }
1609
1610         streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1611                                 sizeof(struct hdcp2_streamid_type);
1612         /* Send it to Repeater */
1613         ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1614                                   sizeof(msgs.stream_manage) - streams_size_delta);
1615         if (ret < 0)
1616                 goto out;
1617
1618         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1619                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1620         if (ret < 0)
1621                 goto out;
1622
1623         data->seq_num_m = hdcp->seq_num_m;
1624
1625         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1626
1627 out:
1628         hdcp->seq_num_m++;
1629
1630         return ret;
1631 }
1632
1633 static
1634 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1635 {
1636         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1637         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1638         struct intel_hdcp *hdcp = &connector->hdcp;
1639         union {
1640                 struct hdcp2_rep_send_receiverid_list recvid_list;
1641                 struct hdcp2_rep_send_ack rep_ack;
1642         } msgs;
1643         const struct intel_hdcp_shim *shim = hdcp->shim;
1644         u32 seq_num_v, device_cnt;
1645         u8 *rx_info;
1646         int ret;
1647
1648         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1649                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1650         if (ret < 0)
1651                 return ret;
1652
1653         rx_info = msgs.recvid_list.rx_info;
1654
1655         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1656             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1657                 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1658                 return -EINVAL;
1659         }
1660
1661         /*
1662          * MST topology is not Type 1 capable if it contains a downstream
1663          * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1664          */
1665         dig_port->hdcp_mst_type1_capable =
1666                 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1667                 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1668
1669         /* Converting and Storing the seq_num_v to local variable as DWORD */
1670         seq_num_v =
1671                 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1672
1673         if (!hdcp->hdcp2_encrypted && seq_num_v) {
1674                 drm_dbg_kms(&dev_priv->drm,
1675                             "Non zero Seq_num_v at first RecvId_List msg\n");
1676                 return -EINVAL;
1677         }
1678
1679         if (seq_num_v < hdcp->seq_num_v) {
1680                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1681                 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1682                 return -EINVAL;
1683         }
1684
1685         device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1686                       HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1687         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1688                                         msgs.recvid_list.receiver_ids,
1689                                         device_cnt) > 0) {
1690                 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1691                 return -EPERM;
1692         }
1693
1694         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1695                                                     &msgs.recvid_list,
1696                                                     &msgs.rep_ack);
1697         if (ret < 0)
1698                 return ret;
1699
1700         hdcp->seq_num_v = seq_num_v;
1701         ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1702                                   sizeof(msgs.rep_ack));
1703         if (ret < 0)
1704                 return ret;
1705
1706         return 0;
1707 }
1708
1709 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1710 {
1711         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1712         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1713         struct intel_hdcp *hdcp = &connector->hdcp;
1714         const struct intel_hdcp_shim *shim = hdcp->shim;
1715         int ret;
1716
1717         ret = hdcp2_authentication_key_exchange(connector);
1718         if (ret < 0) {
1719                 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1720                 return ret;
1721         }
1722
1723         ret = hdcp2_locality_check(connector);
1724         if (ret < 0) {
1725                 drm_dbg_kms(&i915->drm,
1726                             "Locality Check failed. Err : %d\n", ret);
1727                 return ret;
1728         }
1729
1730         ret = hdcp2_session_key_exchange(connector);
1731         if (ret < 0) {
1732                 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1733                 return ret;
1734         }
1735
1736         if (shim->config_stream_type) {
1737                 ret = shim->config_stream_type(dig_port,
1738                                                hdcp->is_repeater,
1739                                                hdcp->content_type);
1740                 if (ret < 0)
1741                         return ret;
1742         }
1743
1744         if (hdcp->is_repeater) {
1745                 ret = hdcp2_authenticate_repeater_topology(connector);
1746                 if (ret < 0) {
1747                         drm_dbg_kms(&i915->drm,
1748                                     "Repeater Auth Failed. Err: %d\n", ret);
1749                         return ret;
1750                 }
1751         }
1752
1753         return ret;
1754 }
1755
1756 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1757 {
1758         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1759         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1760         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1761         struct intel_hdcp *hdcp = &connector->hdcp;
1762         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1763         enum port port = dig_port->base.port;
1764         int ret = 0;
1765
1766         if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1767                             LINK_ENCRYPTION_STATUS)) {
1768                 drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1769                         connector->base.name, connector->base.base.id);
1770                 ret = -EPERM;
1771                 goto link_recover;
1772         }
1773
1774         if (hdcp->shim->stream_2_2_encryption) {
1775                 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1776                 if (ret) {
1777                         drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1778                                 connector->base.name, connector->base.base.id);
1779                         return ret;
1780                 }
1781                 drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1782                             transcoder_name(hdcp->stream_transcoder));
1783         }
1784
1785         return 0;
1786
1787 link_recover:
1788         if (hdcp2_deauthenticate_port(connector) < 0)
1789                 drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1790
1791         dig_port->hdcp_auth_status = false;
1792         data->k = 0;
1793
1794         return ret;
1795 }
1796
1797 static int hdcp2_enable_encryption(struct intel_connector *connector)
1798 {
1799         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1800         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1801         struct intel_hdcp *hdcp = &connector->hdcp;
1802         enum port port = dig_port->base.port;
1803         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1804         int ret;
1805
1806         drm_WARN_ON(&dev_priv->drm,
1807                     intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1808                     LINK_ENCRYPTION_STATUS);
1809         if (hdcp->shim->toggle_signalling) {
1810                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1811                                                     true);
1812                 if (ret) {
1813                         drm_err(&dev_priv->drm,
1814                                 "Failed to enable HDCP signalling. %d\n",
1815                                 ret);
1816                         return ret;
1817                 }
1818         }
1819
1820         if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1821             LINK_AUTH_STATUS)
1822                 /* Link is Authenticated. Now set for Encryption */
1823                 intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1824                              0, CTL_LINK_ENCRYPTION_REQ);
1825
1826         ret = intel_de_wait_for_set(dev_priv,
1827                                     HDCP2_STATUS(dev_priv, cpu_transcoder,
1828                                                  port),
1829                                     LINK_ENCRYPTION_STATUS,
1830                                     HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1831         dig_port->hdcp_auth_status = true;
1832
1833         return ret;
1834 }
1835
1836 static int hdcp2_disable_encryption(struct intel_connector *connector)
1837 {
1838         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1839         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1840         struct intel_hdcp *hdcp = &connector->hdcp;
1841         enum port port = dig_port->base.port;
1842         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1843         int ret;
1844
1845         drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1846                                       LINK_ENCRYPTION_STATUS));
1847
1848         intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1849                      CTL_LINK_ENCRYPTION_REQ, 0);
1850
1851         ret = intel_de_wait_for_clear(dev_priv,
1852                                       HDCP2_STATUS(dev_priv, cpu_transcoder,
1853                                                    port),
1854                                       LINK_ENCRYPTION_STATUS,
1855                                       HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1856         if (ret == -ETIMEDOUT)
1857                 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1858
1859         if (hdcp->shim->toggle_signalling) {
1860                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1861                                                     false);
1862                 if (ret) {
1863                         drm_err(&dev_priv->drm,
1864                                 "Failed to disable HDCP signalling. %d\n",
1865                                 ret);
1866                         return ret;
1867                 }
1868         }
1869
1870         return ret;
1871 }
1872
1873 static int
1874 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1875 {
1876         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1877         int i, tries = 3, ret;
1878
1879         if (!connector->hdcp.is_repeater)
1880                 return 0;
1881
1882         for (i = 0; i < tries; i++) {
1883                 ret = _hdcp2_propagate_stream_management_info(connector);
1884                 if (!ret)
1885                         break;
1886
1887                 /* Lets restart the auth incase of seq_num_m roll over */
1888                 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1889                         drm_dbg_kms(&i915->drm,
1890                                     "seq_num_m roll over.(%d)\n", ret);
1891                         break;
1892                 }
1893
1894                 drm_dbg_kms(&i915->drm,
1895                             "HDCP2 stream management %d of %d Failed.(%d)\n",
1896                             i + 1, tries, ret);
1897         }
1898
1899         return ret;
1900 }
1901
1902 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1903 {
1904         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1905         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1906         int ret = 0, i, tries = 3;
1907
1908         for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1909                 ret = hdcp2_authenticate_sink(connector);
1910                 if (!ret) {
1911                         ret = intel_hdcp_prepare_streams(connector);
1912                         if (ret) {
1913                                 drm_dbg_kms(&i915->drm,
1914                                             "Prepare streams failed.(%d)\n",
1915                                             ret);
1916                                 break;
1917                         }
1918
1919                         ret = hdcp2_propagate_stream_management_info(connector);
1920                         if (ret) {
1921                                 drm_dbg_kms(&i915->drm,
1922                                             "Stream management failed.(%d)\n",
1923                                             ret);
1924                                 break;
1925                         }
1926
1927                         ret = hdcp2_authenticate_port(connector);
1928                         if (!ret)
1929                                 break;
1930                         drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1931                                     ret);
1932                 }
1933
1934                 /* Clearing the mei hdcp session */
1935                 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1936                             i + 1, tries, ret);
1937                 if (hdcp2_deauthenticate_port(connector) < 0)
1938                         drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1939         }
1940
1941         if (!ret && !dig_port->hdcp_auth_status) {
1942                 /*
1943                  * Ensuring the required 200mSec min time interval between
1944                  * Session Key Exchange and encryption.
1945                  */
1946                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1947                 ret = hdcp2_enable_encryption(connector);
1948                 if (ret < 0) {
1949                         drm_dbg_kms(&i915->drm,
1950                                     "Encryption Enable Failed.(%d)\n", ret);
1951                         if (hdcp2_deauthenticate_port(connector) < 0)
1952                                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1953                 }
1954         }
1955
1956         if (!ret)
1957                 ret = hdcp2_enable_stream_encryption(connector);
1958
1959         return ret;
1960 }
1961
1962 static int _intel_hdcp2_enable(struct intel_connector *connector)
1963 {
1964         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1965         struct intel_hdcp *hdcp = &connector->hdcp;
1966         int ret;
1967
1968         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1969                     connector->base.name, connector->base.base.id,
1970                     hdcp->content_type);
1971
1972         ret = hdcp2_authenticate_and_encrypt(connector);
1973         if (ret) {
1974                 drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1975                             hdcp->content_type, ret);
1976                 return ret;
1977         }
1978
1979         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1980                     connector->base.name, connector->base.base.id,
1981                     hdcp->content_type);
1982
1983         hdcp->hdcp2_encrypted = true;
1984         return 0;
1985 }
1986
1987 static int
1988 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1989 {
1990         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1991         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1992         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1993         struct intel_hdcp *hdcp = &connector->hdcp;
1994         int ret;
1995
1996         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1997                     connector->base.name, connector->base.base.id);
1998
1999         if (hdcp->shim->stream_2_2_encryption) {
2000                 ret = hdcp->shim->stream_2_2_encryption(connector, false);
2001                 if (ret) {
2002                         drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
2003                                 connector->base.name, connector->base.base.id);
2004                         return ret;
2005                 }
2006                 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2007                             transcoder_name(hdcp->stream_transcoder));
2008
2009                 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2010                         return 0;
2011         }
2012
2013         ret = hdcp2_disable_encryption(connector);
2014
2015         if (hdcp2_deauthenticate_port(connector) < 0)
2016                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2017
2018         connector->hdcp.hdcp2_encrypted = false;
2019         dig_port->hdcp_auth_status = false;
2020         data->k = 0;
2021
2022         return ret;
2023 }
2024
2025 /* Implements the Link Integrity Check for HDCP2.2 */
2026 static int intel_hdcp2_check_link(struct intel_connector *connector)
2027 {
2028         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2029         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2030         struct intel_hdcp *hdcp = &connector->hdcp;
2031         enum port port = dig_port->base.port;
2032         enum transcoder cpu_transcoder;
2033         int ret = 0;
2034
2035         mutex_lock(&hdcp->mutex);
2036         mutex_lock(&dig_port->hdcp_mutex);
2037         cpu_transcoder = hdcp->cpu_transcoder;
2038
2039         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2040         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2041             !hdcp->hdcp2_encrypted) {
2042                 ret = -EINVAL;
2043                 goto out;
2044         }
2045
2046         if (drm_WARN_ON(&dev_priv->drm,
2047                         !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2048                 drm_err(&dev_priv->drm,
2049                         "HDCP2.2 link stopped the encryption, %x\n",
2050                         intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2051                 ret = -ENXIO;
2052                 _intel_hdcp2_disable(connector, true);
2053                 intel_hdcp_update_value(connector,
2054                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2055                                         true);
2056                 goto out;
2057         }
2058
2059         ret = hdcp->shim->check_2_2_link(dig_port, connector);
2060         if (ret == HDCP_LINK_PROTECTED) {
2061                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2062                         intel_hdcp_update_value(connector,
2063                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2064                                         true);
2065                 }
2066                 goto out;
2067         }
2068
2069         if (ret == HDCP_TOPOLOGY_CHANGE) {
2070                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2071                         goto out;
2072
2073                 drm_dbg_kms(&dev_priv->drm,
2074                             "HDCP2.2 Downstream topology change\n");
2075                 ret = hdcp2_authenticate_repeater_topology(connector);
2076                 if (!ret) {
2077                         intel_hdcp_update_value(connector,
2078                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2079                                         true);
2080                         goto out;
2081                 }
2082                 drm_dbg_kms(&dev_priv->drm,
2083                             "[%s:%d] Repeater topology auth failed.(%d)\n",
2084                             connector->base.name, connector->base.base.id,
2085                             ret);
2086         } else {
2087                 drm_dbg_kms(&dev_priv->drm,
2088                             "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2089                             connector->base.name, connector->base.base.id);
2090         }
2091
2092         ret = _intel_hdcp2_disable(connector, true);
2093         if (ret) {
2094                 drm_err(&dev_priv->drm,
2095                         "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2096                         connector->base.name, connector->base.base.id, ret);
2097                 intel_hdcp_update_value(connector,
2098                                 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2099                 goto out;
2100         }
2101
2102         ret = _intel_hdcp2_enable(connector);
2103         if (ret) {
2104                 drm_dbg_kms(&dev_priv->drm,
2105                             "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2106                             connector->base.name, connector->base.base.id,
2107                             ret);
2108                 intel_hdcp_update_value(connector,
2109                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2110                                         true);
2111                 goto out;
2112         }
2113
2114 out:
2115         mutex_unlock(&dig_port->hdcp_mutex);
2116         mutex_unlock(&hdcp->mutex);
2117         return ret;
2118 }
2119
2120 static void intel_hdcp_check_work(struct work_struct *work)
2121 {
2122         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2123                                                struct intel_hdcp,
2124                                                check_work);
2125         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2126
2127         if (drm_connector_is_unregistered(&connector->base))
2128                 return;
2129
2130         if (!intel_hdcp2_check_link(connector))
2131                 schedule_delayed_work(&hdcp->check_work,
2132                                       DRM_HDCP2_CHECK_PERIOD_MS);
2133         else if (!intel_hdcp_check_link(connector))
2134                 schedule_delayed_work(&hdcp->check_work,
2135                                       DRM_HDCP_CHECK_PERIOD_MS);
2136 }
2137
2138 static int i915_hdcp_component_bind(struct device *i915_kdev,
2139                                     struct device *mei_kdev, void *data)
2140 {
2141         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2142
2143         drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2144         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2145         dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
2146         dev_priv->display.hdcp.master->mei_dev = mei_kdev;
2147         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2148
2149         return 0;
2150 }
2151
2152 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2153                                        struct device *mei_kdev, void *data)
2154 {
2155         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2156
2157         drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2158         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2159         dev_priv->display.hdcp.master = NULL;
2160         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2161 }
2162
2163 static const struct component_ops i915_hdcp_component_ops = {
2164         .bind   = i915_hdcp_component_bind,
2165         .unbind = i915_hdcp_component_unbind,
2166 };
2167
2168 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2169 {
2170         switch (port) {
2171         case PORT_A:
2172                 return MEI_DDI_A;
2173         case PORT_B ... PORT_F:
2174                 return (enum mei_fw_ddi)port;
2175         default:
2176                 return MEI_DDI_INVALID_PORT;
2177         }
2178 }
2179
2180 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2181 {
2182         switch (cpu_transcoder) {
2183         case TRANSCODER_A ... TRANSCODER_D:
2184                 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2185         default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2186                 return MEI_INVALID_TRANSCODER;
2187         }
2188 }
2189
2190 static int initialize_hdcp_port_data(struct intel_connector *connector,
2191                                      struct intel_digital_port *dig_port,
2192                                      const struct intel_hdcp_shim *shim)
2193 {
2194         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2195         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2196         struct intel_hdcp *hdcp = &connector->hdcp;
2197         enum port port = dig_port->base.port;
2198
2199         if (DISPLAY_VER(dev_priv) < 12)
2200                 data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2201         else
2202                 /*
2203                  * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2204                  * with zero(INVALID PORT index).
2205                  */
2206                 data->fw_ddi = MEI_DDI_INVALID_PORT;
2207
2208         /*
2209          * As associated transcoder is set and modified at modeset, here fw_tc
2210          * is initialized to zero (invalid transcoder index). This will be
2211          * retained for <Gen12 forever.
2212          */
2213         data->fw_tc = MEI_INVALID_TRANSCODER;
2214
2215         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2216         data->protocol = (u8)shim->protocol;
2217
2218         if (!data->streams)
2219                 data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2220                                         sizeof(struct hdcp2_streamid_type),
2221                                         GFP_KERNEL);
2222         if (!data->streams) {
2223                 drm_err(&dev_priv->drm, "Out of Memory\n");
2224                 return -ENOMEM;
2225         }
2226         /* For SST */
2227         data->streams[0].stream_id = 0;
2228         data->streams[0].stream_type = hdcp->content_type;
2229
2230         return 0;
2231 }
2232
2233 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2234 {
2235         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2236                 return false;
2237
2238         return (DISPLAY_VER(dev_priv) >= 10 ||
2239                 IS_KABYLAKE(dev_priv) ||
2240                 IS_COFFEELAKE(dev_priv) ||
2241                 IS_COMETLAKE(dev_priv));
2242 }
2243
2244 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2245 {
2246         int ret;
2247
2248         if (!is_hdcp2_supported(dev_priv))
2249                 return;
2250
2251         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2252         drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
2253
2254         dev_priv->display.hdcp.comp_added = true;
2255         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2256         ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2257                                   I915_COMPONENT_HDCP);
2258         if (ret < 0) {
2259                 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2260                             ret);
2261                 mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2262                 dev_priv->display.hdcp.comp_added = false;
2263                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2264                 return;
2265         }
2266 }
2267
2268 static void intel_hdcp2_init(struct intel_connector *connector,
2269                              struct intel_digital_port *dig_port,
2270                              const struct intel_hdcp_shim *shim)
2271 {
2272         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2273         struct intel_hdcp *hdcp = &connector->hdcp;
2274         int ret;
2275
2276         ret = initialize_hdcp_port_data(connector, dig_port, shim);
2277         if (ret) {
2278                 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2279                 return;
2280         }
2281
2282         hdcp->hdcp2_supported = true;
2283 }
2284
2285 int intel_hdcp_init(struct intel_connector *connector,
2286                     struct intel_digital_port *dig_port,
2287                     const struct intel_hdcp_shim *shim)
2288 {
2289         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2290         struct intel_hdcp *hdcp = &connector->hdcp;
2291         int ret;
2292
2293         if (!shim)
2294                 return -EINVAL;
2295
2296         if (is_hdcp2_supported(dev_priv))
2297                 intel_hdcp2_init(connector, dig_port, shim);
2298
2299         ret =
2300         drm_connector_attach_content_protection_property(&connector->base,
2301                                                          hdcp->hdcp2_supported);
2302         if (ret) {
2303                 hdcp->hdcp2_supported = false;
2304                 kfree(dig_port->hdcp_port_data.streams);
2305                 return ret;
2306         }
2307
2308         hdcp->shim = shim;
2309         mutex_init(&hdcp->mutex);
2310         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2311         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2312         init_waitqueue_head(&hdcp->cp_irq_queue);
2313
2314         return 0;
2315 }
2316
2317 int intel_hdcp_enable(struct intel_connector *connector,
2318                       const struct intel_crtc_state *pipe_config, u8 content_type)
2319 {
2320         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2321         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2322         struct intel_hdcp *hdcp = &connector->hdcp;
2323         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2324         int ret = -EINVAL;
2325
2326         if (!hdcp->shim)
2327                 return -ENOENT;
2328
2329         if (!connector->encoder) {
2330                 drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2331                         connector->base.name, connector->base.base.id);
2332                 return -ENODEV;
2333         }
2334
2335         mutex_lock(&hdcp->mutex);
2336         mutex_lock(&dig_port->hdcp_mutex);
2337         drm_WARN_ON(&dev_priv->drm,
2338                     hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2339         hdcp->content_type = content_type;
2340
2341         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2342                 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2343                 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2344         } else {
2345                 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2346                 hdcp->stream_transcoder = INVALID_TRANSCODER;
2347         }
2348
2349         if (DISPLAY_VER(dev_priv) >= 12)
2350                 dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
2351
2352         /*
2353          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2354          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2355          */
2356         if (intel_hdcp2_capable(connector)) {
2357                 ret = _intel_hdcp2_enable(connector);
2358                 if (!ret)
2359                         check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2360         }
2361
2362         /*
2363          * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2364          * be attempted.
2365          */
2366         if (ret && intel_hdcp_capable(connector) &&
2367             hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2368                 ret = _intel_hdcp_enable(connector);
2369         }
2370
2371         if (!ret) {
2372                 schedule_delayed_work(&hdcp->check_work, check_link_interval);
2373                 intel_hdcp_update_value(connector,
2374                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2375                                         true);
2376         }
2377
2378         mutex_unlock(&dig_port->hdcp_mutex);
2379         mutex_unlock(&hdcp->mutex);
2380         return ret;
2381 }
2382
2383 int intel_hdcp_disable(struct intel_connector *connector)
2384 {
2385         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2386         struct intel_hdcp *hdcp = &connector->hdcp;
2387         int ret = 0;
2388
2389         if (!hdcp->shim)
2390                 return -ENOENT;
2391
2392         mutex_lock(&hdcp->mutex);
2393         mutex_lock(&dig_port->hdcp_mutex);
2394
2395         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2396                 goto out;
2397
2398         intel_hdcp_update_value(connector,
2399                                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2400         if (hdcp->hdcp2_encrypted)
2401                 ret = _intel_hdcp2_disable(connector, false);
2402         else if (hdcp->hdcp_encrypted)
2403                 ret = _intel_hdcp_disable(connector);
2404
2405 out:
2406         mutex_unlock(&dig_port->hdcp_mutex);
2407         mutex_unlock(&hdcp->mutex);
2408         cancel_delayed_work_sync(&hdcp->check_work);
2409         return ret;
2410 }
2411
2412 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2413                             struct intel_encoder *encoder,
2414                             const struct intel_crtc_state *crtc_state,
2415                             const struct drm_connector_state *conn_state)
2416 {
2417         struct intel_connector *connector =
2418                                 to_intel_connector(conn_state->connector);
2419         struct intel_hdcp *hdcp = &connector->hdcp;
2420         bool content_protection_type_changed, desired_and_not_enabled = false;
2421
2422         if (!connector->hdcp.shim)
2423                 return;
2424
2425         content_protection_type_changed =
2426                 (conn_state->hdcp_content_type != hdcp->content_type &&
2427                  conn_state->content_protection !=
2428                  DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2429
2430         /*
2431          * During the HDCP encryption session if Type change is requested,
2432          * disable the HDCP and reenable it with new TYPE value.
2433          */
2434         if (conn_state->content_protection ==
2435             DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2436             content_protection_type_changed)
2437                 intel_hdcp_disable(connector);
2438
2439         /*
2440          * Mark the hdcp state as DESIRED after the hdcp disable of type
2441          * change procedure.
2442          */
2443         if (content_protection_type_changed) {
2444                 mutex_lock(&hdcp->mutex);
2445                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2446                 drm_connector_get(&connector->base);
2447                 schedule_work(&hdcp->prop_work);
2448                 mutex_unlock(&hdcp->mutex);
2449         }
2450
2451         if (conn_state->content_protection ==
2452             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2453                 mutex_lock(&hdcp->mutex);
2454                 /* Avoid enabling hdcp, if it already ENABLED */
2455                 desired_and_not_enabled =
2456                         hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2457                 mutex_unlock(&hdcp->mutex);
2458                 /*
2459                  * If HDCP already ENABLED and CP property is DESIRED, schedule
2460                  * prop_work to update correct CP property to user space.
2461                  */
2462                 if (!desired_and_not_enabled && !content_protection_type_changed) {
2463                         drm_connector_get(&connector->base);
2464                         schedule_work(&hdcp->prop_work);
2465                 }
2466         }
2467
2468         if (desired_and_not_enabled || content_protection_type_changed)
2469                 intel_hdcp_enable(connector,
2470                                   crtc_state,
2471                                   (u8)conn_state->hdcp_content_type);
2472 }
2473
2474 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2475 {
2476         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2477         if (!dev_priv->display.hdcp.comp_added) {
2478                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2479                 return;
2480         }
2481
2482         dev_priv->display.hdcp.comp_added = false;
2483         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2484
2485         component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2486 }
2487
2488 void intel_hdcp_cleanup(struct intel_connector *connector)
2489 {
2490         struct intel_hdcp *hdcp = &connector->hdcp;
2491
2492         if (!hdcp->shim)
2493                 return;
2494
2495         /*
2496          * If the connector is registered, it's possible userspace could kick
2497          * off another HDCP enable, which would re-spawn the workers.
2498          */
2499         drm_WARN_ON(connector->base.dev,
2500                 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2501
2502         /*
2503          * Now that the connector is not registered, check_work won't be run,
2504          * but cancel any outstanding instances of it
2505          */
2506         cancel_delayed_work_sync(&hdcp->check_work);
2507
2508         /*
2509          * We don't cancel prop_work in the same way as check_work since it
2510          * requires connection_mutex which could be held while calling this
2511          * function. Instead, we rely on the connector references grabbed before
2512          * scheduling prop_work to ensure the connector is alive when prop_work
2513          * is run. So if we're in the destroy path (which is where this
2514          * function should be called), we're "guaranteed" that prop_work is not
2515          * active (tl;dr This Should Never Happen).
2516          */
2517         drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2518
2519         mutex_lock(&hdcp->mutex);
2520         hdcp->shim = NULL;
2521         mutex_unlock(&hdcp->mutex);
2522 }
2523
2524 void intel_hdcp_atomic_check(struct drm_connector *connector,
2525                              struct drm_connector_state *old_state,
2526                              struct drm_connector_state *new_state)
2527 {
2528         u64 old_cp = old_state->content_protection;
2529         u64 new_cp = new_state->content_protection;
2530         struct drm_crtc_state *crtc_state;
2531
2532         if (!new_state->crtc) {
2533                 /*
2534                  * If the connector is being disabled with CP enabled, mark it
2535                  * desired so it's re-enabled when the connector is brought back
2536                  */
2537                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2538                         new_state->content_protection =
2539                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2540                 return;
2541         }
2542
2543         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2544                                                    new_state->crtc);
2545         /*
2546          * Fix the HDCP uapi content protection state in case of modeset.
2547          * FIXME: As per HDCP content protection property uapi doc, an uevent()
2548          * need to be sent if there is transition from ENABLED->DESIRED.
2549          */
2550         if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2551             (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2552             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2553                 new_state->content_protection =
2554                         DRM_MODE_CONTENT_PROTECTION_DESIRED;
2555
2556         /*
2557          * Nothing to do if the state didn't change, or HDCP was activated since
2558          * the last commit. And also no change in hdcp content type.
2559          */
2560         if (old_cp == new_cp ||
2561             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2562              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2563                 if (old_state->hdcp_content_type ==
2564                                 new_state->hdcp_content_type)
2565                         return;
2566         }
2567
2568         crtc_state->mode_changed = true;
2569 }
2570
2571 /* Handles the CP_IRQ raised from the DP HDCP sink */
2572 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2573 {
2574         struct intel_hdcp *hdcp = &connector->hdcp;
2575
2576         if (!hdcp->shim)
2577                 return;
2578
2579         atomic_inc(&connector->hdcp.cp_irq_count);
2580         wake_up_all(&connector->hdcp.cp_irq_queue);
2581
2582         schedule_delayed_work(&hdcp->check_work, 0);
2583 }