Merge tag 'io_uring-5.13-2021-05-07' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_display_power.h"
21 #include "intel_display_types.h"
22 #include "intel_hdcp.h"
23 #include "intel_sideband.h"
24 #include "intel_connector.h"
25
26 #define KEY_LOAD_TRIES  5
27 #define HDCP2_LC_RETRY_CNT                      3
28
29 static int intel_conn_to_vcpi(struct intel_connector *connector)
30 {
31         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
32         return connector->port  ? connector->port->vcpi.vcpi : 0;
33 }
34
35 static bool
36 intel_streams_type1_capable(struct intel_connector *connector)
37 {
38         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
39         bool capable = false;
40
41         if (!shim)
42                 return capable;
43
44         if (shim->streams_type1_capable)
45                 shim->streams_type1_capable(connector, &capable);
46
47         return capable;
48 }
49
50 /*
51  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
52  * content_type for all streams in DP MST topology because security f/w doesn't
53  * have any provision to mark content_type for each stream separately, it marks
54  * all available streams with the content_type proivided at the time of port
55  * authentication. This may prohibit the userspace to use type1 content on
56  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
57  * DP MST topology. Though it is not compulsory, security fw should change its
58  * policy to mark different content_types for different streams.
59  */
60 static int
61 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
62 {
63         struct drm_connector_list_iter conn_iter;
64         struct intel_digital_port *conn_dig_port;
65         struct intel_connector *connector;
66         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
67         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
68         bool enforce_type0 = false;
69         int k;
70
71         data->k = 0;
72
73         if (dig_port->hdcp_auth_status)
74                 return 0;
75
76         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
77         for_each_intel_connector_iter(connector, &conn_iter) {
78                 if (connector->base.status == connector_status_disconnected)
79                         continue;
80
81                 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
82                         continue;
83
84                 conn_dig_port = intel_attached_dig_port(connector);
85                 if (conn_dig_port != dig_port)
86                         continue;
87
88                 if (!enforce_type0 && !intel_streams_type1_capable(connector))
89                         enforce_type0 = true;
90
91                 data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
92                 data->k++;
93
94                 /* if there is only one active stream */
95                 if (dig_port->dp.active_mst_links <= 1)
96                         break;
97         }
98         drm_connector_list_iter_end(&conn_iter);
99
100         if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
101                 return -EINVAL;
102
103         /*
104          * Apply common protection level across all streams in DP MST Topology.
105          * Use highest supported content type for all streams in DP MST Topology.
106          */
107         for (k = 0; k < data->k; k++)
108                 data->streams[k].stream_type =
109                         enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
110
111         return 0;
112 }
113
114 static
115 bool intel_hdcp_is_ksv_valid(u8 *ksv)
116 {
117         int i, ones = 0;
118         /* KSV has 20 1's and 20 0's */
119         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
120                 ones += hweight8(ksv[i]);
121         if (ones != 20)
122                 return false;
123
124         return true;
125 }
126
127 static
128 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
129                                const struct intel_hdcp_shim *shim, u8 *bksv)
130 {
131         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
132         int ret, i, tries = 2;
133
134         /* HDCP spec states that we must retry the bksv if it is invalid */
135         for (i = 0; i < tries; i++) {
136                 ret = shim->read_bksv(dig_port, bksv);
137                 if (ret)
138                         return ret;
139                 if (intel_hdcp_is_ksv_valid(bksv))
140                         break;
141         }
142         if (i == tries) {
143                 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
144                 return -ENODEV;
145         }
146
147         return 0;
148 }
149
150 /* Is HDCP1.4 capable on Platform and Sink */
151 bool intel_hdcp_capable(struct intel_connector *connector)
152 {
153         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
154         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
155         bool capable = false;
156         u8 bksv[5];
157
158         if (!shim)
159                 return capable;
160
161         if (shim->hdcp_capable) {
162                 shim->hdcp_capable(dig_port, &capable);
163         } else {
164                 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
165                         capable = true;
166         }
167
168         return capable;
169 }
170
171 /* Is HDCP2.2 capable on Platform and Sink */
172 bool intel_hdcp2_capable(struct intel_connector *connector)
173 {
174         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
175         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
176         struct intel_hdcp *hdcp = &connector->hdcp;
177         bool capable = false;
178
179         /* I915 support for HDCP2.2 */
180         if (!hdcp->hdcp2_supported)
181                 return false;
182
183         /* MEI interface is solid */
184         mutex_lock(&dev_priv->hdcp_comp_mutex);
185         if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
186                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
187                 return false;
188         }
189         mutex_unlock(&dev_priv->hdcp_comp_mutex);
190
191         /* Sink's capability for HDCP2.2 */
192         hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
193
194         return capable;
195 }
196
197 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
198                               enum transcoder cpu_transcoder, enum port port)
199 {
200         return intel_de_read(dev_priv,
201                              HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
202                HDCP_STATUS_ENC;
203 }
204
205 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
206                                enum transcoder cpu_transcoder, enum port port)
207 {
208         return intel_de_read(dev_priv,
209                              HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
210                LINK_ENCRYPTION_STATUS;
211 }
212
213 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
214                                     const struct intel_hdcp_shim *shim)
215 {
216         int ret, read_ret;
217         bool ksv_ready;
218
219         /* Poll for ksv list ready (spec says max time allowed is 5s) */
220         ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
221                                                          &ksv_ready),
222                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
223                          100 * 1000);
224         if (ret)
225                 return ret;
226         if (read_ret)
227                 return read_ret;
228         if (!ksv_ready)
229                 return -ETIMEDOUT;
230
231         return 0;
232 }
233
234 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
235 {
236         enum i915_power_well_id id;
237         intel_wakeref_t wakeref;
238         bool enabled = false;
239
240         /*
241          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
242          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
243          */
244         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
245                 id = HSW_DISP_PW_GLOBAL;
246         else
247                 id = SKL_DISP_PW_1;
248
249         /* PG1 (power well #1) needs to be enabled */
250         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
251                 enabled = intel_display_power_well_is_enabled(dev_priv, id);
252
253         /*
254          * Another req for hdcp key loadability is enabled state of pll for
255          * cdclk. Without active crtc we wont land here. So we are assuming that
256          * cdclk is already on.
257          */
258
259         return enabled;
260 }
261
262 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
263 {
264         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
265         intel_de_write(dev_priv, HDCP_KEY_STATUS,
266                        HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
267 }
268
269 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
270 {
271         int ret;
272         u32 val;
273
274         val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
275         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
276                 return 0;
277
278         /*
279          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
280          * out of reset. So if Key is not already loaded, its an error state.
281          */
282         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
283                 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
284                         return -ENXIO;
285
286         /*
287          * Initiate loading the HDCP key from fuses.
288          *
289          * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
290          * platforms except BXT and GLK, differ in the key load trigger process
291          * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
292          */
293         if (IS_GEN9_BC(dev_priv)) {
294                 ret = sandybridge_pcode_write(dev_priv,
295                                               SKL_PCODE_LOAD_HDCP_KEYS, 1);
296                 if (ret) {
297                         drm_err(&dev_priv->drm,
298                                 "Failed to initiate HDCP key load (%d)\n",
299                                 ret);
300                         return ret;
301                 }
302         } else {
303                 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
304         }
305
306         /* Wait for the keys to load (500us) */
307         ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
308                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
309                                         10, 1, &val);
310         if (ret)
311                 return ret;
312         else if (!(val & HDCP_KEY_LOAD_STATUS))
313                 return -ENXIO;
314
315         /* Send Aksv over to PCH display for use in authentication */
316         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
317
318         return 0;
319 }
320
321 /* Returns updated SHA-1 index */
322 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
323 {
324         intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
325         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
326                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
327                 return -ETIMEDOUT;
328         }
329         return 0;
330 }
331
332 static
333 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
334                                 enum transcoder cpu_transcoder, enum port port)
335 {
336         if (DISPLAY_VER(dev_priv) >= 12) {
337                 switch (cpu_transcoder) {
338                 case TRANSCODER_A:
339                         return HDCP_TRANSA_REP_PRESENT |
340                                HDCP_TRANSA_SHA1_M0;
341                 case TRANSCODER_B:
342                         return HDCP_TRANSB_REP_PRESENT |
343                                HDCP_TRANSB_SHA1_M0;
344                 case TRANSCODER_C:
345                         return HDCP_TRANSC_REP_PRESENT |
346                                HDCP_TRANSC_SHA1_M0;
347                 case TRANSCODER_D:
348                         return HDCP_TRANSD_REP_PRESENT |
349                                HDCP_TRANSD_SHA1_M0;
350                 default:
351                         drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
352                                 cpu_transcoder);
353                         return -EINVAL;
354                 }
355         }
356
357         switch (port) {
358         case PORT_A:
359                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
360         case PORT_B:
361                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
362         case PORT_C:
363                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
364         case PORT_D:
365                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
366         case PORT_E:
367                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
368         default:
369                 drm_err(&dev_priv->drm, "Unknown port %d\n", port);
370                 return -EINVAL;
371         }
372 }
373
374 static
375 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
376                                 const struct intel_hdcp_shim *shim,
377                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
378 {
379         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
380         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
381         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
382         enum port port = dig_port->base.port;
383         u32 vprime, sha_text, sha_leftovers, rep_ctl;
384         int ret, i, j, sha_idx;
385
386         /* Process V' values from the receiver */
387         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
388                 ret = shim->read_v_prime_part(dig_port, i, &vprime);
389                 if (ret)
390                         return ret;
391                 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
392         }
393
394         /*
395          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
396          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
397          * stream is written via the HDCP_SHA_TEXT register in 32-bit
398          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
399          * index will keep track of our progress through the 64 bytes as well as
400          * helping us work the 40-bit KSVs through our 32-bit register.
401          *
402          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
403          */
404         sha_idx = 0;
405         sha_text = 0;
406         sha_leftovers = 0;
407         rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
408         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
409         for (i = 0; i < num_downstream; i++) {
410                 unsigned int sha_empty;
411                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
412
413                 /* Fill up the empty slots in sha_text and write it out */
414                 sha_empty = sizeof(sha_text) - sha_leftovers;
415                 for (j = 0; j < sha_empty; j++) {
416                         u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
417                         sha_text |= ksv[j] << off;
418                 }
419
420                 ret = intel_write_sha_text(dev_priv, sha_text);
421                 if (ret < 0)
422                         return ret;
423
424                 /* Programming guide writes this every 64 bytes */
425                 sha_idx += sizeof(sha_text);
426                 if (!(sha_idx % 64))
427                         intel_de_write(dev_priv, HDCP_REP_CTL,
428                                        rep_ctl | HDCP_SHA1_TEXT_32);
429
430                 /* Store the leftover bytes from the ksv in sha_text */
431                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
432                 sha_text = 0;
433                 for (j = 0; j < sha_leftovers; j++)
434                         sha_text |= ksv[sha_empty + j] <<
435                                         ((sizeof(sha_text) - j - 1) * 8);
436
437                 /*
438                  * If we still have room in sha_text for more data, continue.
439                  * Otherwise, write it out immediately.
440                  */
441                 if (sizeof(sha_text) > sha_leftovers)
442                         continue;
443
444                 ret = intel_write_sha_text(dev_priv, sha_text);
445                 if (ret < 0)
446                         return ret;
447                 sha_leftovers = 0;
448                 sha_text = 0;
449                 sha_idx += sizeof(sha_text);
450         }
451
452         /*
453          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
454          * bytes are leftover from the last ksv, we might be able to fit them
455          * all in sha_text (first 2 cases), or we might need to split them up
456          * into 2 writes (last 2 cases).
457          */
458         if (sha_leftovers == 0) {
459                 /* Write 16 bits of text, 16 bits of M0 */
460                 intel_de_write(dev_priv, HDCP_REP_CTL,
461                                rep_ctl | HDCP_SHA1_TEXT_16);
462                 ret = intel_write_sha_text(dev_priv,
463                                            bstatus[0] << 8 | bstatus[1]);
464                 if (ret < 0)
465                         return ret;
466                 sha_idx += sizeof(sha_text);
467
468                 /* Write 32 bits of M0 */
469                 intel_de_write(dev_priv, HDCP_REP_CTL,
470                                rep_ctl | HDCP_SHA1_TEXT_0);
471                 ret = intel_write_sha_text(dev_priv, 0);
472                 if (ret < 0)
473                         return ret;
474                 sha_idx += sizeof(sha_text);
475
476                 /* Write 16 bits of M0 */
477                 intel_de_write(dev_priv, HDCP_REP_CTL,
478                                rep_ctl | HDCP_SHA1_TEXT_16);
479                 ret = intel_write_sha_text(dev_priv, 0);
480                 if (ret < 0)
481                         return ret;
482                 sha_idx += sizeof(sha_text);
483
484         } else if (sha_leftovers == 1) {
485                 /* Write 24 bits of text, 8 bits of M0 */
486                 intel_de_write(dev_priv, HDCP_REP_CTL,
487                                rep_ctl | HDCP_SHA1_TEXT_24);
488                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
489                 /* Only 24-bits of data, must be in the LSB */
490                 sha_text = (sha_text & 0xffffff00) >> 8;
491                 ret = intel_write_sha_text(dev_priv, sha_text);
492                 if (ret < 0)
493                         return ret;
494                 sha_idx += sizeof(sha_text);
495
496                 /* Write 32 bits of M0 */
497                 intel_de_write(dev_priv, HDCP_REP_CTL,
498                                rep_ctl | HDCP_SHA1_TEXT_0);
499                 ret = intel_write_sha_text(dev_priv, 0);
500                 if (ret < 0)
501                         return ret;
502                 sha_idx += sizeof(sha_text);
503
504                 /* Write 24 bits of M0 */
505                 intel_de_write(dev_priv, HDCP_REP_CTL,
506                                rep_ctl | HDCP_SHA1_TEXT_8);
507                 ret = intel_write_sha_text(dev_priv, 0);
508                 if (ret < 0)
509                         return ret;
510                 sha_idx += sizeof(sha_text);
511
512         } else if (sha_leftovers == 2) {
513                 /* Write 32 bits of text */
514                 intel_de_write(dev_priv, HDCP_REP_CTL,
515                                rep_ctl | HDCP_SHA1_TEXT_32);
516                 sha_text |= bstatus[0] << 8 | bstatus[1];
517                 ret = intel_write_sha_text(dev_priv, sha_text);
518                 if (ret < 0)
519                         return ret;
520                 sha_idx += sizeof(sha_text);
521
522                 /* Write 64 bits of M0 */
523                 intel_de_write(dev_priv, HDCP_REP_CTL,
524                                rep_ctl | HDCP_SHA1_TEXT_0);
525                 for (i = 0; i < 2; i++) {
526                         ret = intel_write_sha_text(dev_priv, 0);
527                         if (ret < 0)
528                                 return ret;
529                         sha_idx += sizeof(sha_text);
530                 }
531
532                 /*
533                  * Terminate the SHA-1 stream by hand. For the other leftover
534                  * cases this is appended by the hardware.
535                  */
536                 intel_de_write(dev_priv, HDCP_REP_CTL,
537                                rep_ctl | HDCP_SHA1_TEXT_32);
538                 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
539                 ret = intel_write_sha_text(dev_priv, sha_text);
540                 if (ret < 0)
541                         return ret;
542                 sha_idx += sizeof(sha_text);
543         } else if (sha_leftovers == 3) {
544                 /* Write 32 bits of text (filled from LSB) */
545                 intel_de_write(dev_priv, HDCP_REP_CTL,
546                                rep_ctl | HDCP_SHA1_TEXT_32);
547                 sha_text |= bstatus[0];
548                 ret = intel_write_sha_text(dev_priv, sha_text);
549                 if (ret < 0)
550                         return ret;
551                 sha_idx += sizeof(sha_text);
552
553                 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
554                 intel_de_write(dev_priv, HDCP_REP_CTL,
555                                rep_ctl | HDCP_SHA1_TEXT_8);
556                 ret = intel_write_sha_text(dev_priv, bstatus[1]);
557                 if (ret < 0)
558                         return ret;
559                 sha_idx += sizeof(sha_text);
560
561                 /* Write 32 bits of M0 */
562                 intel_de_write(dev_priv, HDCP_REP_CTL,
563                                rep_ctl | HDCP_SHA1_TEXT_0);
564                 ret = intel_write_sha_text(dev_priv, 0);
565                 if (ret < 0)
566                         return ret;
567                 sha_idx += sizeof(sha_text);
568
569                 /* Write 8 bits of M0 */
570                 intel_de_write(dev_priv, HDCP_REP_CTL,
571                                rep_ctl | HDCP_SHA1_TEXT_24);
572                 ret = intel_write_sha_text(dev_priv, 0);
573                 if (ret < 0)
574                         return ret;
575                 sha_idx += sizeof(sha_text);
576         } else {
577                 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
578                             sha_leftovers);
579                 return -EINVAL;
580         }
581
582         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
583         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
584         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
585                 ret = intel_write_sha_text(dev_priv, 0);
586                 if (ret < 0)
587                         return ret;
588                 sha_idx += sizeof(sha_text);
589         }
590
591         /*
592          * Last write gets the length of the concatenation in bits. That is:
593          *  - 5 bytes per device
594          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
595          */
596         sha_text = (num_downstream * 5 + 10) * 8;
597         ret = intel_write_sha_text(dev_priv, sha_text);
598         if (ret < 0)
599                 return ret;
600
601         /* Tell the HW we're done with the hash and wait for it to ACK */
602         intel_de_write(dev_priv, HDCP_REP_CTL,
603                        rep_ctl | HDCP_SHA1_COMPLETE_HASH);
604         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
605                                   HDCP_SHA1_COMPLETE, 1)) {
606                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
607                 return -ETIMEDOUT;
608         }
609         if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
610                 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
611                 return -ENXIO;
612         }
613
614         return 0;
615 }
616
617 /* Implements Part 2 of the HDCP authorization procedure */
618 static
619 int intel_hdcp_auth_downstream(struct intel_connector *connector)
620 {
621         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
622         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
623         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
624         u8 bstatus[2], num_downstream, *ksv_fifo;
625         int ret, i, tries = 3;
626
627         ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
628         if (ret) {
629                 drm_dbg_kms(&dev_priv->drm,
630                             "KSV list failed to become ready (%d)\n", ret);
631                 return ret;
632         }
633
634         ret = shim->read_bstatus(dig_port, bstatus);
635         if (ret)
636                 return ret;
637
638         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
639             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
640                 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
641                 return -EPERM;
642         }
643
644         /*
645          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
646          * the HDCP encryption. That implies that repeater can't have its own
647          * display. As there is no consumption of encrypted content in the
648          * repeater with 0 downstream devices, we are failing the
649          * authentication.
650          */
651         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
652         if (num_downstream == 0) {
653                 drm_dbg_kms(&dev_priv->drm,
654                             "Repeater with zero downstream devices\n");
655                 return -EINVAL;
656         }
657
658         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
659         if (!ksv_fifo) {
660                 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
661                 return -ENOMEM;
662         }
663
664         ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
665         if (ret)
666                 goto err;
667
668         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
669                                         num_downstream) > 0) {
670                 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
671                 ret = -EPERM;
672                 goto err;
673         }
674
675         /*
676          * When V prime mismatches, DP Spec mandates re-read of
677          * V prime atleast twice.
678          */
679         for (i = 0; i < tries; i++) {
680                 ret = intel_hdcp_validate_v_prime(connector, shim,
681                                                   ksv_fifo, num_downstream,
682                                                   bstatus);
683                 if (!ret)
684                         break;
685         }
686
687         if (i == tries) {
688                 drm_dbg_kms(&dev_priv->drm,
689                             "V Prime validation failed.(%d)\n", ret);
690                 goto err;
691         }
692
693         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
694                     num_downstream);
695         ret = 0;
696 err:
697         kfree(ksv_fifo);
698         return ret;
699 }
700
701 /* Implements Part 1 of the HDCP authorization procedure */
702 static int intel_hdcp_auth(struct intel_connector *connector)
703 {
704         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
705         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
706         struct intel_hdcp *hdcp = &connector->hdcp;
707         const struct intel_hdcp_shim *shim = hdcp->shim;
708         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
709         enum port port = dig_port->base.port;
710         unsigned long r0_prime_gen_start;
711         int ret, i, tries = 2;
712         union {
713                 u32 reg[2];
714                 u8 shim[DRM_HDCP_AN_LEN];
715         } an;
716         union {
717                 u32 reg[2];
718                 u8 shim[DRM_HDCP_KSV_LEN];
719         } bksv;
720         union {
721                 u32 reg;
722                 u8 shim[DRM_HDCP_RI_LEN];
723         } ri;
724         bool repeater_present, hdcp_capable;
725
726         /*
727          * Detects whether the display is HDCP capable. Although we check for
728          * valid Bksv below, the HDCP over DP spec requires that we check
729          * whether the display supports HDCP before we write An. For HDMI
730          * displays, this is not necessary.
731          */
732         if (shim->hdcp_capable) {
733                 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
734                 if (ret)
735                         return ret;
736                 if (!hdcp_capable) {
737                         drm_dbg_kms(&dev_priv->drm,
738                                     "Panel is not HDCP capable\n");
739                         return -EINVAL;
740                 }
741         }
742
743         /* Initialize An with 2 random values and acquire it */
744         for (i = 0; i < 2; i++)
745                 intel_de_write(dev_priv,
746                                HDCP_ANINIT(dev_priv, cpu_transcoder, port),
747                                get_random_u32());
748         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
749                        HDCP_CONF_CAPTURE_AN);
750
751         /* Wait for An to be acquired */
752         if (intel_de_wait_for_set(dev_priv,
753                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
754                                   HDCP_STATUS_AN_READY, 1)) {
755                 drm_err(&dev_priv->drm, "Timed out waiting for An\n");
756                 return -ETIMEDOUT;
757         }
758
759         an.reg[0] = intel_de_read(dev_priv,
760                                   HDCP_ANLO(dev_priv, cpu_transcoder, port));
761         an.reg[1] = intel_de_read(dev_priv,
762                                   HDCP_ANHI(dev_priv, cpu_transcoder, port));
763         ret = shim->write_an_aksv(dig_port, an.shim);
764         if (ret)
765                 return ret;
766
767         r0_prime_gen_start = jiffies;
768
769         memset(&bksv, 0, sizeof(bksv));
770
771         ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
772         if (ret < 0)
773                 return ret;
774
775         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
776                 drm_err(&dev_priv->drm, "BKSV is revoked\n");
777                 return -EPERM;
778         }
779
780         intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
781                        bksv.reg[0]);
782         intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
783                        bksv.reg[1]);
784
785         ret = shim->repeater_present(dig_port, &repeater_present);
786         if (ret)
787                 return ret;
788         if (repeater_present)
789                 intel_de_write(dev_priv, HDCP_REP_CTL,
790                                intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
791
792         ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
793         if (ret)
794                 return ret;
795
796         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
797                        HDCP_CONF_AUTH_AND_ENC);
798
799         /* Wait for R0 ready */
800         if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
801                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
802                 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
803                 return -ETIMEDOUT;
804         }
805
806         /*
807          * Wait for R0' to become available. The spec says 100ms from Aksv, but
808          * some monitors can take longer than this. We'll set the timeout at
809          * 300ms just to be sure.
810          *
811          * On DP, there's an R0_READY bit available but no such bit
812          * exists on HDMI. Since the upper-bound is the same, we'll just do
813          * the stupid thing instead of polling on one and not the other.
814          */
815         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
816
817         tries = 3;
818
819         /*
820          * DP HDCP Spec mandates the two more reattempt to read R0, incase
821          * of R0 mismatch.
822          */
823         for (i = 0; i < tries; i++) {
824                 ri.reg = 0;
825                 ret = shim->read_ri_prime(dig_port, ri.shim);
826                 if (ret)
827                         return ret;
828                 intel_de_write(dev_priv,
829                                HDCP_RPRIME(dev_priv, cpu_transcoder, port),
830                                ri.reg);
831
832                 /* Wait for Ri prime match */
833                 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
834                               (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
835                         break;
836         }
837
838         if (i == tries) {
839                 drm_dbg_kms(&dev_priv->drm,
840                             "Timed out waiting for Ri prime match (%x)\n",
841                             intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
842                                           cpu_transcoder, port)));
843                 return -ETIMEDOUT;
844         }
845
846         /* Wait for encryption confirmation */
847         if (intel_de_wait_for_set(dev_priv,
848                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
849                                   HDCP_STATUS_ENC,
850                                   HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
851                 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
852                 return -ETIMEDOUT;
853         }
854
855         /* DP MST Auth Part 1 Step 2.a and Step 2.b */
856         if (shim->stream_encryption) {
857                 ret = shim->stream_encryption(connector, true);
858                 if (ret) {
859                         drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
860                                 connector->base.name, connector->base.base.id);
861                         return ret;
862                 }
863                 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
864                             transcoder_name(hdcp->stream_transcoder));
865         }
866
867         if (repeater_present)
868                 return intel_hdcp_auth_downstream(connector);
869
870         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
871         return 0;
872 }
873
874 static int _intel_hdcp_disable(struct intel_connector *connector)
875 {
876         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
877         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
878         struct intel_hdcp *hdcp = &connector->hdcp;
879         enum port port = dig_port->base.port;
880         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
881         u32 repeater_ctl;
882         int ret;
883
884         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
885                     connector->base.name, connector->base.base.id);
886
887         if (hdcp->shim->stream_encryption) {
888                 ret = hdcp->shim->stream_encryption(connector, false);
889                 if (ret) {
890                         drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
891                                 connector->base.name, connector->base.base.id);
892                         return ret;
893                 }
894                 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
895                             transcoder_name(hdcp->stream_transcoder));
896                 /*
897                  * If there are other connectors on this port using HDCP,
898                  * don't disable it until it disabled HDCP encryption for
899                  * all connectors in MST topology.
900                  */
901                 if (dig_port->num_hdcp_streams > 0)
902                         return 0;
903         }
904
905         hdcp->hdcp_encrypted = false;
906         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
907         if (intel_de_wait_for_clear(dev_priv,
908                                     HDCP_STATUS(dev_priv, cpu_transcoder, port),
909                                     ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
910                 drm_err(&dev_priv->drm,
911                         "Failed to disable HDCP, timeout clearing status\n");
912                 return -ETIMEDOUT;
913         }
914
915         repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
916                                                    port);
917         intel_de_write(dev_priv, HDCP_REP_CTL,
918                        intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
919
920         ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
921         if (ret) {
922                 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
923                 return ret;
924         }
925
926         drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
927         return 0;
928 }
929
930 static int _intel_hdcp_enable(struct intel_connector *connector)
931 {
932         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
933         struct intel_hdcp *hdcp = &connector->hdcp;
934         int i, ret, tries = 3;
935
936         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
937                     connector->base.name, connector->base.base.id);
938
939         if (!hdcp_key_loadable(dev_priv)) {
940                 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
941                 return -ENXIO;
942         }
943
944         for (i = 0; i < KEY_LOAD_TRIES; i++) {
945                 ret = intel_hdcp_load_keys(dev_priv);
946                 if (!ret)
947                         break;
948                 intel_hdcp_clear_keys(dev_priv);
949         }
950         if (ret) {
951                 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
952                         ret);
953                 return ret;
954         }
955
956         /* Incase of authentication failures, HDCP spec expects reauth. */
957         for (i = 0; i < tries; i++) {
958                 ret = intel_hdcp_auth(connector);
959                 if (!ret) {
960                         hdcp->hdcp_encrypted = true;
961                         return 0;
962                 }
963
964                 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
965
966                 /* Ensuring HDCP encryption and signalling are stopped. */
967                 _intel_hdcp_disable(connector);
968         }
969
970         drm_dbg_kms(&dev_priv->drm,
971                     "HDCP authentication failed (%d tries/%d)\n", tries, ret);
972         return ret;
973 }
974
975 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
976 {
977         return container_of(hdcp, struct intel_connector, hdcp);
978 }
979
980 static void intel_hdcp_update_value(struct intel_connector *connector,
981                                     u64 value, bool update_property)
982 {
983         struct drm_device *dev = connector->base.dev;
984         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
985         struct intel_hdcp *hdcp = &connector->hdcp;
986
987         drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
988
989         if (hdcp->value == value)
990                 return;
991
992         drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
993
994         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
995                 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
996                         dig_port->num_hdcp_streams--;
997         } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
998                 dig_port->num_hdcp_streams++;
999         }
1000
1001         hdcp->value = value;
1002         if (update_property) {
1003                 drm_connector_get(&connector->base);
1004                 schedule_work(&hdcp->prop_work);
1005         }
1006 }
1007
1008 /* Implements Part 3 of the HDCP authorization procedure */
1009 static int intel_hdcp_check_link(struct intel_connector *connector)
1010 {
1011         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1012         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1013         struct intel_hdcp *hdcp = &connector->hdcp;
1014         enum port port = dig_port->base.port;
1015         enum transcoder cpu_transcoder;
1016         int ret = 0;
1017
1018         mutex_lock(&hdcp->mutex);
1019         mutex_lock(&dig_port->hdcp_mutex);
1020
1021         cpu_transcoder = hdcp->cpu_transcoder;
1022
1023         /* Check_link valid only when HDCP1.4 is enabled */
1024         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1025             !hdcp->hdcp_encrypted) {
1026                 ret = -EINVAL;
1027                 goto out;
1028         }
1029
1030         if (drm_WARN_ON(&dev_priv->drm,
1031                         !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1032                 drm_err(&dev_priv->drm,
1033                         "%s:%d HDCP link stopped encryption,%x\n",
1034                         connector->base.name, connector->base.base.id,
1035                         intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1036                 ret = -ENXIO;
1037                 intel_hdcp_update_value(connector,
1038                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1039                                         true);
1040                 goto out;
1041         }
1042
1043         if (hdcp->shim->check_link(dig_port, connector)) {
1044                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1045                         intel_hdcp_update_value(connector,
1046                                 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1047                 }
1048                 goto out;
1049         }
1050
1051         drm_dbg_kms(&dev_priv->drm,
1052                     "[%s:%d] HDCP link failed, retrying authentication\n",
1053                     connector->base.name, connector->base.base.id);
1054
1055         ret = _intel_hdcp_disable(connector);
1056         if (ret) {
1057                 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1058                 intel_hdcp_update_value(connector,
1059                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1060                                         true);
1061                 goto out;
1062         }
1063
1064         ret = _intel_hdcp_enable(connector);
1065         if (ret) {
1066                 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1067                 intel_hdcp_update_value(connector,
1068                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1069                                         true);
1070                 goto out;
1071         }
1072
1073 out:
1074         mutex_unlock(&dig_port->hdcp_mutex);
1075         mutex_unlock(&hdcp->mutex);
1076         return ret;
1077 }
1078
1079 static void intel_hdcp_prop_work(struct work_struct *work)
1080 {
1081         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1082                                                prop_work);
1083         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1084         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1085
1086         drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1087         mutex_lock(&hdcp->mutex);
1088
1089         /*
1090          * This worker is only used to flip between ENABLED/DESIRED. Either of
1091          * those to UNDESIRED is handled by core. If value == UNDESIRED,
1092          * we're running just after hdcp has been disabled, so just exit
1093          */
1094         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1095                 drm_hdcp_update_content_protection(&connector->base,
1096                                                    hdcp->value);
1097
1098         mutex_unlock(&hdcp->mutex);
1099         drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1100
1101         drm_connector_put(&connector->base);
1102 }
1103
1104 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1105 {
1106         return INTEL_INFO(dev_priv)->display.has_hdcp &&
1107                         (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1108 }
1109
1110 static int
1111 hdcp2_prepare_ake_init(struct intel_connector *connector,
1112                        struct hdcp2_ake_init *ake_data)
1113 {
1114         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1115         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1116         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1117         struct i915_hdcp_comp_master *comp;
1118         int ret;
1119
1120         mutex_lock(&dev_priv->hdcp_comp_mutex);
1121         comp = dev_priv->hdcp_master;
1122
1123         if (!comp || !comp->ops) {
1124                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1125                 return -EINVAL;
1126         }
1127
1128         ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1129         if (ret)
1130                 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1131                             ret);
1132         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1133
1134         return ret;
1135 }
1136
1137 static int
1138 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1139                                 struct hdcp2_ake_send_cert *rx_cert,
1140                                 bool *paired,
1141                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
1142                                 size_t *msg_sz)
1143 {
1144         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1145         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1146         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1147         struct i915_hdcp_comp_master *comp;
1148         int ret;
1149
1150         mutex_lock(&dev_priv->hdcp_comp_mutex);
1151         comp = dev_priv->hdcp_master;
1152
1153         if (!comp || !comp->ops) {
1154                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1155                 return -EINVAL;
1156         }
1157
1158         ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1159                                                          rx_cert, paired,
1160                                                          ek_pub_km, msg_sz);
1161         if (ret < 0)
1162                 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1163                             ret);
1164         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1165
1166         return ret;
1167 }
1168
1169 static int hdcp2_verify_hprime(struct intel_connector *connector,
1170                                struct hdcp2_ake_send_hprime *rx_hprime)
1171 {
1172         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1173         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1174         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1175         struct i915_hdcp_comp_master *comp;
1176         int ret;
1177
1178         mutex_lock(&dev_priv->hdcp_comp_mutex);
1179         comp = dev_priv->hdcp_master;
1180
1181         if (!comp || !comp->ops) {
1182                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1183                 return -EINVAL;
1184         }
1185
1186         ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1187         if (ret < 0)
1188                 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1189         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1190
1191         return ret;
1192 }
1193
1194 static int
1195 hdcp2_store_pairing_info(struct intel_connector *connector,
1196                          struct hdcp2_ake_send_pairing_info *pairing_info)
1197 {
1198         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1199         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1200         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1201         struct i915_hdcp_comp_master *comp;
1202         int ret;
1203
1204         mutex_lock(&dev_priv->hdcp_comp_mutex);
1205         comp = dev_priv->hdcp_master;
1206
1207         if (!comp || !comp->ops) {
1208                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1209                 return -EINVAL;
1210         }
1211
1212         ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1213         if (ret < 0)
1214                 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1215                             ret);
1216         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1217
1218         return ret;
1219 }
1220
1221 static int
1222 hdcp2_prepare_lc_init(struct intel_connector *connector,
1223                       struct hdcp2_lc_init *lc_init)
1224 {
1225         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1226         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1227         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1228         struct i915_hdcp_comp_master *comp;
1229         int ret;
1230
1231         mutex_lock(&dev_priv->hdcp_comp_mutex);
1232         comp = dev_priv->hdcp_master;
1233
1234         if (!comp || !comp->ops) {
1235                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1236                 return -EINVAL;
1237         }
1238
1239         ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1240         if (ret < 0)
1241                 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1242                             ret);
1243         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1244
1245         return ret;
1246 }
1247
1248 static int
1249 hdcp2_verify_lprime(struct intel_connector *connector,
1250                     struct hdcp2_lc_send_lprime *rx_lprime)
1251 {
1252         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1253         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1254         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1255         struct i915_hdcp_comp_master *comp;
1256         int ret;
1257
1258         mutex_lock(&dev_priv->hdcp_comp_mutex);
1259         comp = dev_priv->hdcp_master;
1260
1261         if (!comp || !comp->ops) {
1262                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1263                 return -EINVAL;
1264         }
1265
1266         ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1267         if (ret < 0)
1268                 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1269                             ret);
1270         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1271
1272         return ret;
1273 }
1274
1275 static int hdcp2_prepare_skey(struct intel_connector *connector,
1276                               struct hdcp2_ske_send_eks *ske_data)
1277 {
1278         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1279         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1280         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1281         struct i915_hdcp_comp_master *comp;
1282         int ret;
1283
1284         mutex_lock(&dev_priv->hdcp_comp_mutex);
1285         comp = dev_priv->hdcp_master;
1286
1287         if (!comp || !comp->ops) {
1288                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1289                 return -EINVAL;
1290         }
1291
1292         ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1293         if (ret < 0)
1294                 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1295                             ret);
1296         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1297
1298         return ret;
1299 }
1300
1301 static int
1302 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1303                                       struct hdcp2_rep_send_receiverid_list
1304                                                                 *rep_topology,
1305                                       struct hdcp2_rep_send_ack *rep_send_ack)
1306 {
1307         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1308         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1309         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1310         struct i915_hdcp_comp_master *comp;
1311         int ret;
1312
1313         mutex_lock(&dev_priv->hdcp_comp_mutex);
1314         comp = dev_priv->hdcp_master;
1315
1316         if (!comp || !comp->ops) {
1317                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1318                 return -EINVAL;
1319         }
1320
1321         ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1322                                                          rep_topology,
1323                                                          rep_send_ack);
1324         if (ret < 0)
1325                 drm_dbg_kms(&dev_priv->drm,
1326                             "Verify rep topology failed. %d\n", ret);
1327         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1328
1329         return ret;
1330 }
1331
1332 static int
1333 hdcp2_verify_mprime(struct intel_connector *connector,
1334                     struct hdcp2_rep_stream_ready *stream_ready)
1335 {
1336         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1337         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1338         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1339         struct i915_hdcp_comp_master *comp;
1340         int ret;
1341
1342         mutex_lock(&dev_priv->hdcp_comp_mutex);
1343         comp = dev_priv->hdcp_master;
1344
1345         if (!comp || !comp->ops) {
1346                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1347                 return -EINVAL;
1348         }
1349
1350         ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1351         if (ret < 0)
1352                 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1353         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1354
1355         return ret;
1356 }
1357
1358 static int hdcp2_authenticate_port(struct intel_connector *connector)
1359 {
1360         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1361         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1362         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1363         struct i915_hdcp_comp_master *comp;
1364         int ret;
1365
1366         mutex_lock(&dev_priv->hdcp_comp_mutex);
1367         comp = dev_priv->hdcp_master;
1368
1369         if (!comp || !comp->ops) {
1370                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1371                 return -EINVAL;
1372         }
1373
1374         ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1375         if (ret < 0)
1376                 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1377                             ret);
1378         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1379
1380         return ret;
1381 }
1382
1383 static int hdcp2_close_mei_session(struct intel_connector *connector)
1384 {
1385         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1386         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1387         struct i915_hdcp_comp_master *comp;
1388         int ret;
1389
1390         mutex_lock(&dev_priv->hdcp_comp_mutex);
1391         comp = dev_priv->hdcp_master;
1392
1393         if (!comp || !comp->ops) {
1394                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1395                 return -EINVAL;
1396         }
1397
1398         ret = comp->ops->close_hdcp_session(comp->mei_dev,
1399                                              &dig_port->hdcp_port_data);
1400         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1401
1402         return ret;
1403 }
1404
1405 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1406 {
1407         return hdcp2_close_mei_session(connector);
1408 }
1409
1410 /* Authentication flow starts from here */
1411 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1412 {
1413         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1414         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1415         struct intel_hdcp *hdcp = &connector->hdcp;
1416         union {
1417                 struct hdcp2_ake_init ake_init;
1418                 struct hdcp2_ake_send_cert send_cert;
1419                 struct hdcp2_ake_no_stored_km no_stored_km;
1420                 struct hdcp2_ake_send_hprime send_hprime;
1421                 struct hdcp2_ake_send_pairing_info pairing_info;
1422         } msgs;
1423         const struct intel_hdcp_shim *shim = hdcp->shim;
1424         size_t size;
1425         int ret;
1426
1427         /* Init for seq_num */
1428         hdcp->seq_num_v = 0;
1429         hdcp->seq_num_m = 0;
1430
1431         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1432         if (ret < 0)
1433                 return ret;
1434
1435         ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1436                                   sizeof(msgs.ake_init));
1437         if (ret < 0)
1438                 return ret;
1439
1440         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1441                                  &msgs.send_cert, sizeof(msgs.send_cert));
1442         if (ret < 0)
1443                 return ret;
1444
1445         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1446                 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1447                 return -EINVAL;
1448         }
1449
1450         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1451
1452         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1453                                         msgs.send_cert.cert_rx.receiver_id,
1454                                         1) > 0) {
1455                 drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1456                 return -EPERM;
1457         }
1458
1459         /*
1460          * Here msgs.no_stored_km will hold msgs corresponding to the km
1461          * stored also.
1462          */
1463         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1464                                               &hdcp->is_paired,
1465                                               &msgs.no_stored_km, &size);
1466         if (ret < 0)
1467                 return ret;
1468
1469         ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1470         if (ret < 0)
1471                 return ret;
1472
1473         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1474                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1475         if (ret < 0)
1476                 return ret;
1477
1478         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1479         if (ret < 0)
1480                 return ret;
1481
1482         if (!hdcp->is_paired) {
1483                 /* Pairing is required */
1484                 ret = shim->read_2_2_msg(dig_port,
1485                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1486                                          &msgs.pairing_info,
1487                                          sizeof(msgs.pairing_info));
1488                 if (ret < 0)
1489                         return ret;
1490
1491                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1492                 if (ret < 0)
1493                         return ret;
1494                 hdcp->is_paired = true;
1495         }
1496
1497         return 0;
1498 }
1499
1500 static int hdcp2_locality_check(struct intel_connector *connector)
1501 {
1502         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1503         struct intel_hdcp *hdcp = &connector->hdcp;
1504         union {
1505                 struct hdcp2_lc_init lc_init;
1506                 struct hdcp2_lc_send_lprime send_lprime;
1507         } msgs;
1508         const struct intel_hdcp_shim *shim = hdcp->shim;
1509         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1510
1511         for (i = 0; i < tries; i++) {
1512                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1513                 if (ret < 0)
1514                         continue;
1515
1516                 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1517                                       sizeof(msgs.lc_init));
1518                 if (ret < 0)
1519                         continue;
1520
1521                 ret = shim->read_2_2_msg(dig_port,
1522                                          HDCP_2_2_LC_SEND_LPRIME,
1523                                          &msgs.send_lprime,
1524                                          sizeof(msgs.send_lprime));
1525                 if (ret < 0)
1526                         continue;
1527
1528                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1529                 if (!ret)
1530                         break;
1531         }
1532
1533         return ret;
1534 }
1535
1536 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1537 {
1538         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1539         struct intel_hdcp *hdcp = &connector->hdcp;
1540         struct hdcp2_ske_send_eks send_eks;
1541         int ret;
1542
1543         ret = hdcp2_prepare_skey(connector, &send_eks);
1544         if (ret < 0)
1545                 return ret;
1546
1547         ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1548                                         sizeof(send_eks));
1549         if (ret < 0)
1550                 return ret;
1551
1552         return 0;
1553 }
1554
1555 static
1556 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1557 {
1558         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1559         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1560         struct intel_hdcp *hdcp = &connector->hdcp;
1561         union {
1562                 struct hdcp2_rep_stream_manage stream_manage;
1563                 struct hdcp2_rep_stream_ready stream_ready;
1564         } msgs;
1565         const struct intel_hdcp_shim *shim = hdcp->shim;
1566         int ret, streams_size_delta, i;
1567
1568         if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1569                 return -ERANGE;
1570
1571         /* Prepare RepeaterAuth_Stream_Manage msg */
1572         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1573         drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1574
1575         msgs.stream_manage.k = cpu_to_be16(data->k);
1576
1577         for (i = 0; i < data->k; i++) {
1578                 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1579                 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1580         }
1581
1582         streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1583                                 sizeof(struct hdcp2_streamid_type);
1584         /* Send it to Repeater */
1585         ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1586                                   sizeof(msgs.stream_manage) - streams_size_delta);
1587         if (ret < 0)
1588                 goto out;
1589
1590         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1591                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1592         if (ret < 0)
1593                 goto out;
1594
1595         data->seq_num_m = hdcp->seq_num_m;
1596
1597         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1598
1599 out:
1600         hdcp->seq_num_m++;
1601
1602         return ret;
1603 }
1604
1605 static
1606 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1607 {
1608         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1609         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1610         struct intel_hdcp *hdcp = &connector->hdcp;
1611         union {
1612                 struct hdcp2_rep_send_receiverid_list recvid_list;
1613                 struct hdcp2_rep_send_ack rep_ack;
1614         } msgs;
1615         const struct intel_hdcp_shim *shim = hdcp->shim;
1616         u32 seq_num_v, device_cnt;
1617         u8 *rx_info;
1618         int ret;
1619
1620         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1621                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1622         if (ret < 0)
1623                 return ret;
1624
1625         rx_info = msgs.recvid_list.rx_info;
1626
1627         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1628             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1629                 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1630                 return -EINVAL;
1631         }
1632
1633         /* Converting and Storing the seq_num_v to local variable as DWORD */
1634         seq_num_v =
1635                 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1636
1637         if (!hdcp->hdcp2_encrypted && seq_num_v) {
1638                 drm_dbg_kms(&dev_priv->drm,
1639                             "Non zero Seq_num_v at first RecvId_List msg\n");
1640                 return -EINVAL;
1641         }
1642
1643         if (seq_num_v < hdcp->seq_num_v) {
1644                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1645                 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1646                 return -EINVAL;
1647         }
1648
1649         device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1650                       HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1651         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1652                                         msgs.recvid_list.receiver_ids,
1653                                         device_cnt) > 0) {
1654                 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1655                 return -EPERM;
1656         }
1657
1658         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1659                                                     &msgs.recvid_list,
1660                                                     &msgs.rep_ack);
1661         if (ret < 0)
1662                 return ret;
1663
1664         hdcp->seq_num_v = seq_num_v;
1665         ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1666                                   sizeof(msgs.rep_ack));
1667         if (ret < 0)
1668                 return ret;
1669
1670         return 0;
1671 }
1672
1673 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1674 {
1675         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1676         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1677         struct intel_hdcp *hdcp = &connector->hdcp;
1678         const struct intel_hdcp_shim *shim = hdcp->shim;
1679         int ret;
1680
1681         ret = hdcp2_authentication_key_exchange(connector);
1682         if (ret < 0) {
1683                 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1684                 return ret;
1685         }
1686
1687         ret = hdcp2_locality_check(connector);
1688         if (ret < 0) {
1689                 drm_dbg_kms(&i915->drm,
1690                             "Locality Check failed. Err : %d\n", ret);
1691                 return ret;
1692         }
1693
1694         ret = hdcp2_session_key_exchange(connector);
1695         if (ret < 0) {
1696                 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1697                 return ret;
1698         }
1699
1700         if (shim->config_stream_type) {
1701                 ret = shim->config_stream_type(dig_port,
1702                                                hdcp->is_repeater,
1703                                                hdcp->content_type);
1704                 if (ret < 0)
1705                         return ret;
1706         }
1707
1708         if (hdcp->is_repeater) {
1709                 ret = hdcp2_authenticate_repeater_topology(connector);
1710                 if (ret < 0) {
1711                         drm_dbg_kms(&i915->drm,
1712                                     "Repeater Auth Failed. Err: %d\n", ret);
1713                         return ret;
1714                 }
1715         }
1716
1717         return ret;
1718 }
1719
1720 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1721 {
1722         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1723         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1724         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1725         struct intel_hdcp *hdcp = &connector->hdcp;
1726         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1727         enum port port = dig_port->base.port;
1728         int ret = 0;
1729
1730         if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1731                             LINK_ENCRYPTION_STATUS)) {
1732                 drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1733                         connector->base.name, connector->base.base.id);
1734                 ret = -EPERM;
1735                 goto link_recover;
1736         }
1737
1738         if (hdcp->shim->stream_2_2_encryption) {
1739                 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1740                 if (ret) {
1741                         drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1742                                 connector->base.name, connector->base.base.id);
1743                         return ret;
1744                 }
1745                 drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1746                             transcoder_name(hdcp->stream_transcoder));
1747         }
1748
1749         return 0;
1750
1751 link_recover:
1752         if (hdcp2_deauthenticate_port(connector) < 0)
1753                 drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1754
1755         dig_port->hdcp_auth_status = false;
1756         data->k = 0;
1757
1758         return ret;
1759 }
1760
1761 static int hdcp2_enable_encryption(struct intel_connector *connector)
1762 {
1763         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1764         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1765         struct intel_hdcp *hdcp = &connector->hdcp;
1766         enum port port = dig_port->base.port;
1767         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1768         int ret;
1769
1770         drm_WARN_ON(&dev_priv->drm,
1771                     intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1772                     LINK_ENCRYPTION_STATUS);
1773         if (hdcp->shim->toggle_signalling) {
1774                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1775                                                     true);
1776                 if (ret) {
1777                         drm_err(&dev_priv->drm,
1778                                 "Failed to enable HDCP signalling. %d\n",
1779                                 ret);
1780                         return ret;
1781                 }
1782         }
1783
1784         if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1785             LINK_AUTH_STATUS) {
1786                 /* Link is Authenticated. Now set for Encryption */
1787                 intel_de_write(dev_priv,
1788                                HDCP2_CTL(dev_priv, cpu_transcoder, port),
1789                                intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1790         }
1791
1792         ret = intel_de_wait_for_set(dev_priv,
1793                                     HDCP2_STATUS(dev_priv, cpu_transcoder,
1794                                                  port),
1795                                     LINK_ENCRYPTION_STATUS,
1796                                     HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1797         dig_port->hdcp_auth_status = true;
1798
1799         return ret;
1800 }
1801
1802 static int hdcp2_disable_encryption(struct intel_connector *connector)
1803 {
1804         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1805         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1806         struct intel_hdcp *hdcp = &connector->hdcp;
1807         enum port port = dig_port->base.port;
1808         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1809         int ret;
1810
1811         drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1812                                       LINK_ENCRYPTION_STATUS));
1813
1814         intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1815                        intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1816
1817         ret = intel_de_wait_for_clear(dev_priv,
1818                                       HDCP2_STATUS(dev_priv, cpu_transcoder,
1819                                                    port),
1820                                       LINK_ENCRYPTION_STATUS,
1821                                       HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1822         if (ret == -ETIMEDOUT)
1823                 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1824
1825         if (hdcp->shim->toggle_signalling) {
1826                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1827                                                     false);
1828                 if (ret) {
1829                         drm_err(&dev_priv->drm,
1830                                 "Failed to disable HDCP signalling. %d\n",
1831                                 ret);
1832                         return ret;
1833                 }
1834         }
1835
1836         return ret;
1837 }
1838
1839 static int
1840 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1841 {
1842         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1843         int i, tries = 3, ret;
1844
1845         if (!connector->hdcp.is_repeater)
1846                 return 0;
1847
1848         for (i = 0; i < tries; i++) {
1849                 ret = _hdcp2_propagate_stream_management_info(connector);
1850                 if (!ret)
1851                         break;
1852
1853                 /* Lets restart the auth incase of seq_num_m roll over */
1854                 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1855                         drm_dbg_kms(&i915->drm,
1856                                     "seq_num_m roll over.(%d)\n", ret);
1857                         break;
1858                 }
1859
1860                 drm_dbg_kms(&i915->drm,
1861                             "HDCP2 stream management %d of %d Failed.(%d)\n",
1862                             i + 1, tries, ret);
1863         }
1864
1865         return ret;
1866 }
1867
1868 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1869 {
1870         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1871         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1872         int ret = 0, i, tries = 3;
1873
1874         for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1875                 ret = hdcp2_authenticate_sink(connector);
1876                 if (!ret) {
1877                         ret = hdcp2_propagate_stream_management_info(connector);
1878                         if (ret) {
1879                                 drm_dbg_kms(&i915->drm,
1880                                             "Stream management failed.(%d)\n",
1881                                             ret);
1882                                 break;
1883                         }
1884
1885                         ret = hdcp2_authenticate_port(connector);
1886                         if (!ret)
1887                                 break;
1888                         drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1889                                     ret);
1890                 }
1891
1892                 /* Clearing the mei hdcp session */
1893                 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1894                             i + 1, tries, ret);
1895                 if (hdcp2_deauthenticate_port(connector) < 0)
1896                         drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1897         }
1898
1899         if (!ret && !dig_port->hdcp_auth_status) {
1900                 /*
1901                  * Ensuring the required 200mSec min time interval between
1902                  * Session Key Exchange and encryption.
1903                  */
1904                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1905                 ret = hdcp2_enable_encryption(connector);
1906                 if (ret < 0) {
1907                         drm_dbg_kms(&i915->drm,
1908                                     "Encryption Enable Failed.(%d)\n", ret);
1909                         if (hdcp2_deauthenticate_port(connector) < 0)
1910                                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1911                 }
1912         }
1913
1914         if (!ret)
1915                 ret = hdcp2_enable_stream_encryption(connector);
1916
1917         return ret;
1918 }
1919
1920 static int _intel_hdcp2_enable(struct intel_connector *connector)
1921 {
1922         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1923         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1924         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1925         struct intel_hdcp *hdcp = &connector->hdcp;
1926         int ret;
1927
1928         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1929                     connector->base.name, connector->base.base.id,
1930                     hdcp->content_type);
1931
1932         /* Stream which requires encryption */
1933         if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
1934                 data->k = 1;
1935                 data->streams[0].stream_type = hdcp->content_type;
1936         } else {
1937                 ret = intel_hdcp_required_content_stream(dig_port);
1938                 if (ret)
1939                         return ret;
1940         }
1941
1942         ret = hdcp2_authenticate_and_encrypt(connector);
1943         if (ret) {
1944                 drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1945                             hdcp->content_type, ret);
1946                 return ret;
1947         }
1948
1949         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1950                     connector->base.name, connector->base.base.id,
1951                     hdcp->content_type);
1952
1953         hdcp->hdcp2_encrypted = true;
1954         return 0;
1955 }
1956
1957 static int
1958 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1959 {
1960         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1961         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1962         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1963         struct intel_hdcp *hdcp = &connector->hdcp;
1964         int ret;
1965
1966         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1967                     connector->base.name, connector->base.base.id);
1968
1969         if (hdcp->shim->stream_2_2_encryption) {
1970                 ret = hdcp->shim->stream_2_2_encryption(connector, false);
1971                 if (ret) {
1972                         drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1973                                 connector->base.name, connector->base.base.id);
1974                         return ret;
1975                 }
1976                 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1977                             transcoder_name(hdcp->stream_transcoder));
1978
1979                 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
1980                         return 0;
1981         }
1982
1983         ret = hdcp2_disable_encryption(connector);
1984
1985         if (hdcp2_deauthenticate_port(connector) < 0)
1986                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1987
1988         connector->hdcp.hdcp2_encrypted = false;
1989         dig_port->hdcp_auth_status = false;
1990         data->k = 0;
1991
1992         return ret;
1993 }
1994
1995 /* Implements the Link Integrity Check for HDCP2.2 */
1996 static int intel_hdcp2_check_link(struct intel_connector *connector)
1997 {
1998         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1999         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2000         struct intel_hdcp *hdcp = &connector->hdcp;
2001         enum port port = dig_port->base.port;
2002         enum transcoder cpu_transcoder;
2003         int ret = 0;
2004
2005         mutex_lock(&hdcp->mutex);
2006         mutex_lock(&dig_port->hdcp_mutex);
2007         cpu_transcoder = hdcp->cpu_transcoder;
2008
2009         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2010         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2011             !hdcp->hdcp2_encrypted) {
2012                 ret = -EINVAL;
2013                 goto out;
2014         }
2015
2016         if (drm_WARN_ON(&dev_priv->drm,
2017                         !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2018                 drm_err(&dev_priv->drm,
2019                         "HDCP2.2 link stopped the encryption, %x\n",
2020                         intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2021                 ret = -ENXIO;
2022                 _intel_hdcp2_disable(connector, true);
2023                 intel_hdcp_update_value(connector,
2024                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2025                                         true);
2026                 goto out;
2027         }
2028
2029         ret = hdcp->shim->check_2_2_link(dig_port, connector);
2030         if (ret == HDCP_LINK_PROTECTED) {
2031                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2032                         intel_hdcp_update_value(connector,
2033                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2034                                         true);
2035                 }
2036                 goto out;
2037         }
2038
2039         if (ret == HDCP_TOPOLOGY_CHANGE) {
2040                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2041                         goto out;
2042
2043                 drm_dbg_kms(&dev_priv->drm,
2044                             "HDCP2.2 Downstream topology change\n");
2045                 ret = hdcp2_authenticate_repeater_topology(connector);
2046                 if (!ret) {
2047                         intel_hdcp_update_value(connector,
2048                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2049                                         true);
2050                         goto out;
2051                 }
2052                 drm_dbg_kms(&dev_priv->drm,
2053                             "[%s:%d] Repeater topology auth failed.(%d)\n",
2054                             connector->base.name, connector->base.base.id,
2055                             ret);
2056         } else {
2057                 drm_dbg_kms(&dev_priv->drm,
2058                             "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2059                             connector->base.name, connector->base.base.id);
2060         }
2061
2062         ret = _intel_hdcp2_disable(connector, true);
2063         if (ret) {
2064                 drm_err(&dev_priv->drm,
2065                         "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2066                         connector->base.name, connector->base.base.id, ret);
2067                 intel_hdcp_update_value(connector,
2068                                 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2069                 goto out;
2070         }
2071
2072         ret = _intel_hdcp2_enable(connector);
2073         if (ret) {
2074                 drm_dbg_kms(&dev_priv->drm,
2075                             "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2076                             connector->base.name, connector->base.base.id,
2077                             ret);
2078                 intel_hdcp_update_value(connector,
2079                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2080                                         true);
2081                 goto out;
2082         }
2083
2084 out:
2085         mutex_unlock(&dig_port->hdcp_mutex);
2086         mutex_unlock(&hdcp->mutex);
2087         return ret;
2088 }
2089
2090 static void intel_hdcp_check_work(struct work_struct *work)
2091 {
2092         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2093                                                struct intel_hdcp,
2094                                                check_work);
2095         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2096
2097         if (drm_connector_is_unregistered(&connector->base))
2098                 return;
2099
2100         if (!intel_hdcp2_check_link(connector))
2101                 schedule_delayed_work(&hdcp->check_work,
2102                                       DRM_HDCP2_CHECK_PERIOD_MS);
2103         else if (!intel_hdcp_check_link(connector))
2104                 schedule_delayed_work(&hdcp->check_work,
2105                                       DRM_HDCP_CHECK_PERIOD_MS);
2106 }
2107
2108 static int i915_hdcp_component_bind(struct device *i915_kdev,
2109                                     struct device *mei_kdev, void *data)
2110 {
2111         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2112
2113         drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2114         mutex_lock(&dev_priv->hdcp_comp_mutex);
2115         dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
2116         dev_priv->hdcp_master->mei_dev = mei_kdev;
2117         mutex_unlock(&dev_priv->hdcp_comp_mutex);
2118
2119         return 0;
2120 }
2121
2122 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2123                                        struct device *mei_kdev, void *data)
2124 {
2125         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2126
2127         drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2128         mutex_lock(&dev_priv->hdcp_comp_mutex);
2129         dev_priv->hdcp_master = NULL;
2130         mutex_unlock(&dev_priv->hdcp_comp_mutex);
2131 }
2132
2133 static const struct component_ops i915_hdcp_component_ops = {
2134         .bind   = i915_hdcp_component_bind,
2135         .unbind = i915_hdcp_component_unbind,
2136 };
2137
2138 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2139 {
2140         switch (port) {
2141         case PORT_A:
2142                 return MEI_DDI_A;
2143         case PORT_B ... PORT_F:
2144                 return (enum mei_fw_ddi)port;
2145         default:
2146                 return MEI_DDI_INVALID_PORT;
2147         }
2148 }
2149
2150 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2151 {
2152         switch (cpu_transcoder) {
2153         case TRANSCODER_A ... TRANSCODER_D:
2154                 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2155         default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2156                 return MEI_INVALID_TRANSCODER;
2157         }
2158 }
2159
2160 static int initialize_hdcp_port_data(struct intel_connector *connector,
2161                                      struct intel_digital_port *dig_port,
2162                                      const struct intel_hdcp_shim *shim)
2163 {
2164         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2165         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2166         struct intel_hdcp *hdcp = &connector->hdcp;
2167         enum port port = dig_port->base.port;
2168
2169         if (DISPLAY_VER(dev_priv) < 12)
2170                 data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2171         else
2172                 /*
2173                  * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2174                  * with zero(INVALID PORT index).
2175                  */
2176                 data->fw_ddi = MEI_DDI_INVALID_PORT;
2177
2178         /*
2179          * As associated transcoder is set and modified at modeset, here fw_tc
2180          * is initialized to zero (invalid transcoder index). This will be
2181          * retained for <Gen12 forever.
2182          */
2183         data->fw_tc = MEI_INVALID_TRANSCODER;
2184
2185         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2186         data->protocol = (u8)shim->protocol;
2187
2188         if (!data->streams)
2189                 data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2190                                         sizeof(struct hdcp2_streamid_type),
2191                                         GFP_KERNEL);
2192         if (!data->streams) {
2193                 drm_err(&dev_priv->drm, "Out of Memory\n");
2194                 return -ENOMEM;
2195         }
2196         /* For SST */
2197         data->streams[0].stream_id = 0;
2198         data->streams[0].stream_type = hdcp->content_type;
2199
2200         return 0;
2201 }
2202
2203 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2204 {
2205         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2206                 return false;
2207
2208         return (DISPLAY_VER(dev_priv) >= 10 ||
2209                 IS_KABYLAKE(dev_priv) ||
2210                 IS_COFFEELAKE(dev_priv) ||
2211                 IS_COMETLAKE(dev_priv));
2212 }
2213
2214 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2215 {
2216         int ret;
2217
2218         if (!is_hdcp2_supported(dev_priv))
2219                 return;
2220
2221         mutex_lock(&dev_priv->hdcp_comp_mutex);
2222         drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
2223
2224         dev_priv->hdcp_comp_added = true;
2225         mutex_unlock(&dev_priv->hdcp_comp_mutex);
2226         ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2227                                   I915_COMPONENT_HDCP);
2228         if (ret < 0) {
2229                 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2230                             ret);
2231                 mutex_lock(&dev_priv->hdcp_comp_mutex);
2232                 dev_priv->hdcp_comp_added = false;
2233                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2234                 return;
2235         }
2236 }
2237
2238 static void intel_hdcp2_init(struct intel_connector *connector,
2239                              struct intel_digital_port *dig_port,
2240                              const struct intel_hdcp_shim *shim)
2241 {
2242         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2243         struct intel_hdcp *hdcp = &connector->hdcp;
2244         int ret;
2245
2246         ret = initialize_hdcp_port_data(connector, dig_port, shim);
2247         if (ret) {
2248                 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2249                 return;
2250         }
2251
2252         hdcp->hdcp2_supported = true;
2253 }
2254
2255 int intel_hdcp_init(struct intel_connector *connector,
2256                     struct intel_digital_port *dig_port,
2257                     const struct intel_hdcp_shim *shim)
2258 {
2259         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2260         struct intel_hdcp *hdcp = &connector->hdcp;
2261         int ret;
2262
2263         if (!shim)
2264                 return -EINVAL;
2265
2266         if (is_hdcp2_supported(dev_priv))
2267                 intel_hdcp2_init(connector, dig_port, shim);
2268
2269         ret =
2270         drm_connector_attach_content_protection_property(&connector->base,
2271                                                          hdcp->hdcp2_supported);
2272         if (ret) {
2273                 hdcp->hdcp2_supported = false;
2274                 kfree(dig_port->hdcp_port_data.streams);
2275                 return ret;
2276         }
2277
2278         hdcp->shim = shim;
2279         mutex_init(&hdcp->mutex);
2280         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2281         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2282         init_waitqueue_head(&hdcp->cp_irq_queue);
2283
2284         return 0;
2285 }
2286
2287 int intel_hdcp_enable(struct intel_connector *connector,
2288                       const struct intel_crtc_state *pipe_config, u8 content_type)
2289 {
2290         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2291         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2292         struct intel_hdcp *hdcp = &connector->hdcp;
2293         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2294         int ret = -EINVAL;
2295
2296         if (!hdcp->shim)
2297                 return -ENOENT;
2298
2299         if (!connector->encoder) {
2300                 drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2301                         connector->base.name, connector->base.base.id);
2302                 return -ENODEV;
2303         }
2304
2305         mutex_lock(&hdcp->mutex);
2306         mutex_lock(&dig_port->hdcp_mutex);
2307         drm_WARN_ON(&dev_priv->drm,
2308                     hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2309         hdcp->content_type = content_type;
2310
2311         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2312                 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2313                 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2314         } else {
2315                 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2316                 hdcp->stream_transcoder = INVALID_TRANSCODER;
2317         }
2318
2319         if (DISPLAY_VER(dev_priv) >= 12)
2320                 dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
2321
2322         /*
2323          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2324          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2325          */
2326         if (intel_hdcp2_capable(connector)) {
2327                 ret = _intel_hdcp2_enable(connector);
2328                 if (!ret)
2329                         check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2330         }
2331
2332         /*
2333          * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2334          * be attempted.
2335          */
2336         if (ret && intel_hdcp_capable(connector) &&
2337             hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2338                 ret = _intel_hdcp_enable(connector);
2339         }
2340
2341         if (!ret) {
2342                 schedule_delayed_work(&hdcp->check_work, check_link_interval);
2343                 intel_hdcp_update_value(connector,
2344                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2345                                         true);
2346         }
2347
2348         mutex_unlock(&dig_port->hdcp_mutex);
2349         mutex_unlock(&hdcp->mutex);
2350         return ret;
2351 }
2352
2353 int intel_hdcp_disable(struct intel_connector *connector)
2354 {
2355         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2356         struct intel_hdcp *hdcp = &connector->hdcp;
2357         int ret = 0;
2358
2359         if (!hdcp->shim)
2360                 return -ENOENT;
2361
2362         mutex_lock(&hdcp->mutex);
2363         mutex_lock(&dig_port->hdcp_mutex);
2364
2365         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2366                 goto out;
2367
2368         intel_hdcp_update_value(connector,
2369                                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2370         if (hdcp->hdcp2_encrypted)
2371                 ret = _intel_hdcp2_disable(connector, false);
2372         else if (hdcp->hdcp_encrypted)
2373                 ret = _intel_hdcp_disable(connector);
2374
2375 out:
2376         mutex_unlock(&dig_port->hdcp_mutex);
2377         mutex_unlock(&hdcp->mutex);
2378         cancel_delayed_work_sync(&hdcp->check_work);
2379         return ret;
2380 }
2381
2382 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2383                             struct intel_encoder *encoder,
2384                             const struct intel_crtc_state *crtc_state,
2385                             const struct drm_connector_state *conn_state)
2386 {
2387         struct intel_connector *connector =
2388                                 to_intel_connector(conn_state->connector);
2389         struct intel_hdcp *hdcp = &connector->hdcp;
2390         bool content_protection_type_changed, desired_and_not_enabled = false;
2391
2392         if (!connector->hdcp.shim)
2393                 return;
2394
2395         content_protection_type_changed =
2396                 (conn_state->hdcp_content_type != hdcp->content_type &&
2397                  conn_state->content_protection !=
2398                  DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2399
2400         /*
2401          * During the HDCP encryption session if Type change is requested,
2402          * disable the HDCP and reenable it with new TYPE value.
2403          */
2404         if (conn_state->content_protection ==
2405             DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2406             content_protection_type_changed)
2407                 intel_hdcp_disable(connector);
2408
2409         /*
2410          * Mark the hdcp state as DESIRED after the hdcp disable of type
2411          * change procedure.
2412          */
2413         if (content_protection_type_changed) {
2414                 mutex_lock(&hdcp->mutex);
2415                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2416                 drm_connector_get(&connector->base);
2417                 schedule_work(&hdcp->prop_work);
2418                 mutex_unlock(&hdcp->mutex);
2419         }
2420
2421         if (conn_state->content_protection ==
2422             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2423                 mutex_lock(&hdcp->mutex);
2424                 /* Avoid enabling hdcp, if it already ENABLED */
2425                 desired_and_not_enabled =
2426                         hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2427                 mutex_unlock(&hdcp->mutex);
2428                 /*
2429                  * If HDCP already ENABLED and CP property is DESIRED, schedule
2430                  * prop_work to update correct CP property to user space.
2431                  */
2432                 if (!desired_and_not_enabled && !content_protection_type_changed) {
2433                         drm_connector_get(&connector->base);
2434                         schedule_work(&hdcp->prop_work);
2435                 }
2436         }
2437
2438         if (desired_and_not_enabled || content_protection_type_changed)
2439                 intel_hdcp_enable(connector,
2440                                   crtc_state,
2441                                   (u8)conn_state->hdcp_content_type);
2442 }
2443
2444 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2445 {
2446         mutex_lock(&dev_priv->hdcp_comp_mutex);
2447         if (!dev_priv->hdcp_comp_added) {
2448                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2449                 return;
2450         }
2451
2452         dev_priv->hdcp_comp_added = false;
2453         mutex_unlock(&dev_priv->hdcp_comp_mutex);
2454
2455         component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2456 }
2457
2458 void intel_hdcp_cleanup(struct intel_connector *connector)
2459 {
2460         struct intel_hdcp *hdcp = &connector->hdcp;
2461
2462         if (!hdcp->shim)
2463                 return;
2464
2465         /*
2466          * If the connector is registered, it's possible userspace could kick
2467          * off another HDCP enable, which would re-spawn the workers.
2468          */
2469         drm_WARN_ON(connector->base.dev,
2470                 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2471
2472         /*
2473          * Now that the connector is not registered, check_work won't be run,
2474          * but cancel any outstanding instances of it
2475          */
2476         cancel_delayed_work_sync(&hdcp->check_work);
2477
2478         /*
2479          * We don't cancel prop_work in the same way as check_work since it
2480          * requires connection_mutex which could be held while calling this
2481          * function. Instead, we rely on the connector references grabbed before
2482          * scheduling prop_work to ensure the connector is alive when prop_work
2483          * is run. So if we're in the destroy path (which is where this
2484          * function should be called), we're "guaranteed" that prop_work is not
2485          * active (tl;dr This Should Never Happen).
2486          */
2487         drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2488
2489         mutex_lock(&hdcp->mutex);
2490         hdcp->shim = NULL;
2491         mutex_unlock(&hdcp->mutex);
2492 }
2493
2494 void intel_hdcp_atomic_check(struct drm_connector *connector,
2495                              struct drm_connector_state *old_state,
2496                              struct drm_connector_state *new_state)
2497 {
2498         u64 old_cp = old_state->content_protection;
2499         u64 new_cp = new_state->content_protection;
2500         struct drm_crtc_state *crtc_state;
2501
2502         if (!new_state->crtc) {
2503                 /*
2504                  * If the connector is being disabled with CP enabled, mark it
2505                  * desired so it's re-enabled when the connector is brought back
2506                  */
2507                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2508                         new_state->content_protection =
2509                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2510                 return;
2511         }
2512
2513         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2514                                                    new_state->crtc);
2515         /*
2516          * Fix the HDCP uapi content protection state in case of modeset.
2517          * FIXME: As per HDCP content protection property uapi doc, an uevent()
2518          * need to be sent if there is transition from ENABLED->DESIRED.
2519          */
2520         if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2521             (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2522             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2523                 new_state->content_protection =
2524                         DRM_MODE_CONTENT_PROTECTION_DESIRED;
2525
2526         /*
2527          * Nothing to do if the state didn't change, or HDCP was activated since
2528          * the last commit. And also no change in hdcp content type.
2529          */
2530         if (old_cp == new_cp ||
2531             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2532              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2533                 if (old_state->hdcp_content_type ==
2534                                 new_state->hdcp_content_type)
2535                         return;
2536         }
2537
2538         crtc_state->mode_changed = true;
2539 }
2540
2541 /* Handles the CP_IRQ raised from the DP HDCP sink */
2542 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2543 {
2544         struct intel_hdcp *hdcp = &connector->hdcp;
2545
2546         if (!hdcp->shim)
2547                 return;
2548
2549         atomic_inc(&connector->hdcp.cp_irq_count);
2550         wake_up_all(&connector->hdcp.cp_irq_queue);
2551
2552         schedule_delayed_work(&hdcp->check_work, 0);
2553 }