Merge tag 'net-6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <seanpaul@chromium.org>
8  * Ramalingam C <ramalingam.c@intel.com>
9  */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_regs.h"
27 #include "intel_pcode.h"
28
29 #define KEY_LOAD_TRIES  5
30 #define HDCP2_LC_RETRY_CNT                      3
31
32 static int intel_conn_to_vcpi(struct intel_connector *connector)
33 {
34         struct drm_dp_mst_topology_mgr *mgr;
35         struct drm_dp_mst_atomic_payload *payload;
36         struct drm_dp_mst_topology_state *mst_state;
37         int vcpi = 0;
38
39         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
40         if (!connector->port)
41                 return 0;
42         mgr = connector->port->mgr;
43
44         drm_modeset_lock(&mgr->base.lock, NULL);
45         mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
46         payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
47         if (drm_WARN_ON(mgr->dev, !payload))
48                 goto out;
49
50         vcpi = payload->vcpi;
51         if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
52                 vcpi = 0;
53                 goto out;
54         }
55 out:
56         drm_modeset_unlock(&mgr->base.lock);
57         return vcpi;
58 }
59
60 /*
61  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
62  * content_type for all streams in DP MST topology because security f/w doesn't
63  * have any provision to mark content_type for each stream separately, it marks
64  * all available streams with the content_type proivided at the time of port
65  * authentication. This may prohibit the userspace to use type1 content on
66  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
67  * DP MST topology. Though it is not compulsory, security fw should change its
68  * policy to mark different content_types for different streams.
69  */
70 static int
71 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
72 {
73         struct drm_connector_list_iter conn_iter;
74         struct intel_digital_port *conn_dig_port;
75         struct intel_connector *connector;
76         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
77         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
78         bool enforce_type0 = false;
79         int k;
80
81         data->k = 0;
82
83         if (dig_port->hdcp_auth_status)
84                 return 0;
85
86         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
87         for_each_intel_connector_iter(connector, &conn_iter) {
88                 if (connector->base.status == connector_status_disconnected)
89                         continue;
90
91                 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
92                         continue;
93
94                 conn_dig_port = intel_attached_dig_port(connector);
95                 if (conn_dig_port != dig_port)
96                         continue;
97
98                 if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
99                         enforce_type0 = true;
100
101                 data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
102                 data->k++;
103
104                 /* if there is only one active stream */
105                 if (dig_port->dp.active_mst_links <= 1)
106                         break;
107         }
108         drm_connector_list_iter_end(&conn_iter);
109
110         if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
111                 return -EINVAL;
112
113         /*
114          * Apply common protection level across all streams in DP MST Topology.
115          * Use highest supported content type for all streams in DP MST Topology.
116          */
117         for (k = 0; k < data->k; k++)
118                 data->streams[k].stream_type =
119                         enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
120
121         return 0;
122 }
123
124 static int intel_hdcp_prepare_streams(struct intel_connector *connector)
125 {
126         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
127         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
128         struct intel_hdcp *hdcp = &connector->hdcp;
129         int ret;
130
131         if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
132                 data->k = 1;
133                 data->streams[0].stream_type = hdcp->content_type;
134         } else {
135                 ret = intel_hdcp_required_content_stream(dig_port);
136                 if (ret)
137                         return ret;
138         }
139
140         return 0;
141 }
142
143 static
144 bool intel_hdcp_is_ksv_valid(u8 *ksv)
145 {
146         int i, ones = 0;
147         /* KSV has 20 1's and 20 0's */
148         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
149                 ones += hweight8(ksv[i]);
150         if (ones != 20)
151                 return false;
152
153         return true;
154 }
155
156 static
157 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
158                                const struct intel_hdcp_shim *shim, u8 *bksv)
159 {
160         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
161         int ret, i, tries = 2;
162
163         /* HDCP spec states that we must retry the bksv if it is invalid */
164         for (i = 0; i < tries; i++) {
165                 ret = shim->read_bksv(dig_port, bksv);
166                 if (ret)
167                         return ret;
168                 if (intel_hdcp_is_ksv_valid(bksv))
169                         break;
170         }
171         if (i == tries) {
172                 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
173                 return -ENODEV;
174         }
175
176         return 0;
177 }
178
179 /* Is HDCP1.4 capable on Platform and Sink */
180 bool intel_hdcp_capable(struct intel_connector *connector)
181 {
182         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
183         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
184         bool capable = false;
185         u8 bksv[5];
186
187         if (!shim)
188                 return capable;
189
190         if (shim->hdcp_capable) {
191                 shim->hdcp_capable(dig_port, &capable);
192         } else {
193                 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
194                         capable = true;
195         }
196
197         return capable;
198 }
199
200 /* Is HDCP2.2 capable on Platform and Sink */
201 bool intel_hdcp2_capable(struct intel_connector *connector)
202 {
203         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
204         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
205         struct intel_hdcp *hdcp = &connector->hdcp;
206         bool capable = false;
207
208         /* I915 support for HDCP2.2 */
209         if (!hdcp->hdcp2_supported)
210                 return false;
211
212         /* MEI interface is solid */
213         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
214         if (!dev_priv->display.hdcp.comp_added ||  !dev_priv->display.hdcp.master) {
215                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
216                 return false;
217         }
218         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
219
220         /* Sink's capability for HDCP2.2 */
221         hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
222
223         return capable;
224 }
225
226 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
227                               enum transcoder cpu_transcoder, enum port port)
228 {
229         return intel_de_read(dev_priv,
230                              HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
231                HDCP_STATUS_ENC;
232 }
233
234 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
235                                enum transcoder cpu_transcoder, enum port port)
236 {
237         return intel_de_read(dev_priv,
238                              HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
239                LINK_ENCRYPTION_STATUS;
240 }
241
242 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
243                                     const struct intel_hdcp_shim *shim)
244 {
245         int ret, read_ret;
246         bool ksv_ready;
247
248         /* Poll for ksv list ready (spec says max time allowed is 5s) */
249         ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
250                                                          &ksv_ready),
251                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
252                          100 * 1000);
253         if (ret)
254                 return ret;
255         if (read_ret)
256                 return read_ret;
257         if (!ksv_ready)
258                 return -ETIMEDOUT;
259
260         return 0;
261 }
262
263 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
264 {
265         enum i915_power_well_id id;
266         intel_wakeref_t wakeref;
267         bool enabled = false;
268
269         /*
270          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
271          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
272          */
273         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
274                 id = HSW_DISP_PW_GLOBAL;
275         else
276                 id = SKL_DISP_PW_1;
277
278         /* PG1 (power well #1) needs to be enabled */
279         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
280                 enabled = intel_display_power_well_is_enabled(dev_priv, id);
281
282         /*
283          * Another req for hdcp key loadability is enabled state of pll for
284          * cdclk. Without active crtc we wont land here. So we are assuming that
285          * cdclk is already on.
286          */
287
288         return enabled;
289 }
290
291 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
292 {
293         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
294         intel_de_write(dev_priv, HDCP_KEY_STATUS,
295                        HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
296 }
297
298 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
299 {
300         int ret;
301         u32 val;
302
303         val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
304         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
305                 return 0;
306
307         /*
308          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
309          * out of reset. So if Key is not already loaded, its an error state.
310          */
311         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
312                 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
313                         return -ENXIO;
314
315         /*
316          * Initiate loading the HDCP key from fuses.
317          *
318          * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
319          * version 9 platforms (minus BXT) differ in the key load trigger
320          * process from other platforms. These platforms use the GT Driver
321          * Mailbox interface.
322          */
323         if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
324                 ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
325                 if (ret) {
326                         drm_err(&dev_priv->drm,
327                                 "Failed to initiate HDCP key load (%d)\n",
328                                 ret);
329                         return ret;
330                 }
331         } else {
332                 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
333         }
334
335         /* Wait for the keys to load (500us) */
336         ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
337                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
338                                         10, 1, &val);
339         if (ret)
340                 return ret;
341         else if (!(val & HDCP_KEY_LOAD_STATUS))
342                 return -ENXIO;
343
344         /* Send Aksv over to PCH display for use in authentication */
345         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
346
347         return 0;
348 }
349
350 /* Returns updated SHA-1 index */
351 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
352 {
353         intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
354         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
355                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
356                 return -ETIMEDOUT;
357         }
358         return 0;
359 }
360
361 static
362 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
363                                 enum transcoder cpu_transcoder, enum port port)
364 {
365         if (DISPLAY_VER(dev_priv) >= 12) {
366                 switch (cpu_transcoder) {
367                 case TRANSCODER_A:
368                         return HDCP_TRANSA_REP_PRESENT |
369                                HDCP_TRANSA_SHA1_M0;
370                 case TRANSCODER_B:
371                         return HDCP_TRANSB_REP_PRESENT |
372                                HDCP_TRANSB_SHA1_M0;
373                 case TRANSCODER_C:
374                         return HDCP_TRANSC_REP_PRESENT |
375                                HDCP_TRANSC_SHA1_M0;
376                 case TRANSCODER_D:
377                         return HDCP_TRANSD_REP_PRESENT |
378                                HDCP_TRANSD_SHA1_M0;
379                 default:
380                         drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
381                                 cpu_transcoder);
382                         return -EINVAL;
383                 }
384         }
385
386         switch (port) {
387         case PORT_A:
388                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
389         case PORT_B:
390                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
391         case PORT_C:
392                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
393         case PORT_D:
394                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
395         case PORT_E:
396                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
397         default:
398                 drm_err(&dev_priv->drm, "Unknown port %d\n", port);
399                 return -EINVAL;
400         }
401 }
402
403 static
404 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
405                                 const struct intel_hdcp_shim *shim,
406                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
407 {
408         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
409         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
410         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
411         enum port port = dig_port->base.port;
412         u32 vprime, sha_text, sha_leftovers, rep_ctl;
413         int ret, i, j, sha_idx;
414
415         /* Process V' values from the receiver */
416         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
417                 ret = shim->read_v_prime_part(dig_port, i, &vprime);
418                 if (ret)
419                         return ret;
420                 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
421         }
422
423         /*
424          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
425          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
426          * stream is written via the HDCP_SHA_TEXT register in 32-bit
427          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
428          * index will keep track of our progress through the 64 bytes as well as
429          * helping us work the 40-bit KSVs through our 32-bit register.
430          *
431          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
432          */
433         sha_idx = 0;
434         sha_text = 0;
435         sha_leftovers = 0;
436         rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
437         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
438         for (i = 0; i < num_downstream; i++) {
439                 unsigned int sha_empty;
440                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
441
442                 /* Fill up the empty slots in sha_text and write it out */
443                 sha_empty = sizeof(sha_text) - sha_leftovers;
444                 for (j = 0; j < sha_empty; j++) {
445                         u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
446                         sha_text |= ksv[j] << off;
447                 }
448
449                 ret = intel_write_sha_text(dev_priv, sha_text);
450                 if (ret < 0)
451                         return ret;
452
453                 /* Programming guide writes this every 64 bytes */
454                 sha_idx += sizeof(sha_text);
455                 if (!(sha_idx % 64))
456                         intel_de_write(dev_priv, HDCP_REP_CTL,
457                                        rep_ctl | HDCP_SHA1_TEXT_32);
458
459                 /* Store the leftover bytes from the ksv in sha_text */
460                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
461                 sha_text = 0;
462                 for (j = 0; j < sha_leftovers; j++)
463                         sha_text |= ksv[sha_empty + j] <<
464                                         ((sizeof(sha_text) - j - 1) * 8);
465
466                 /*
467                  * If we still have room in sha_text for more data, continue.
468                  * Otherwise, write it out immediately.
469                  */
470                 if (sizeof(sha_text) > sha_leftovers)
471                         continue;
472
473                 ret = intel_write_sha_text(dev_priv, sha_text);
474                 if (ret < 0)
475                         return ret;
476                 sha_leftovers = 0;
477                 sha_text = 0;
478                 sha_idx += sizeof(sha_text);
479         }
480
481         /*
482          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
483          * bytes are leftover from the last ksv, we might be able to fit them
484          * all in sha_text (first 2 cases), or we might need to split them up
485          * into 2 writes (last 2 cases).
486          */
487         if (sha_leftovers == 0) {
488                 /* Write 16 bits of text, 16 bits of M0 */
489                 intel_de_write(dev_priv, HDCP_REP_CTL,
490                                rep_ctl | HDCP_SHA1_TEXT_16);
491                 ret = intel_write_sha_text(dev_priv,
492                                            bstatus[0] << 8 | bstatus[1]);
493                 if (ret < 0)
494                         return ret;
495                 sha_idx += sizeof(sha_text);
496
497                 /* Write 32 bits of M0 */
498                 intel_de_write(dev_priv, HDCP_REP_CTL,
499                                rep_ctl | HDCP_SHA1_TEXT_0);
500                 ret = intel_write_sha_text(dev_priv, 0);
501                 if (ret < 0)
502                         return ret;
503                 sha_idx += sizeof(sha_text);
504
505                 /* Write 16 bits of M0 */
506                 intel_de_write(dev_priv, HDCP_REP_CTL,
507                                rep_ctl | HDCP_SHA1_TEXT_16);
508                 ret = intel_write_sha_text(dev_priv, 0);
509                 if (ret < 0)
510                         return ret;
511                 sha_idx += sizeof(sha_text);
512
513         } else if (sha_leftovers == 1) {
514                 /* Write 24 bits of text, 8 bits of M0 */
515                 intel_de_write(dev_priv, HDCP_REP_CTL,
516                                rep_ctl | HDCP_SHA1_TEXT_24);
517                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
518                 /* Only 24-bits of data, must be in the LSB */
519                 sha_text = (sha_text & 0xffffff00) >> 8;
520                 ret = intel_write_sha_text(dev_priv, sha_text);
521                 if (ret < 0)
522                         return ret;
523                 sha_idx += sizeof(sha_text);
524
525                 /* Write 32 bits of M0 */
526                 intel_de_write(dev_priv, HDCP_REP_CTL,
527                                rep_ctl | HDCP_SHA1_TEXT_0);
528                 ret = intel_write_sha_text(dev_priv, 0);
529                 if (ret < 0)
530                         return ret;
531                 sha_idx += sizeof(sha_text);
532
533                 /* Write 24 bits of M0 */
534                 intel_de_write(dev_priv, HDCP_REP_CTL,
535                                rep_ctl | HDCP_SHA1_TEXT_8);
536                 ret = intel_write_sha_text(dev_priv, 0);
537                 if (ret < 0)
538                         return ret;
539                 sha_idx += sizeof(sha_text);
540
541         } else if (sha_leftovers == 2) {
542                 /* Write 32 bits of text */
543                 intel_de_write(dev_priv, HDCP_REP_CTL,
544                                rep_ctl | HDCP_SHA1_TEXT_32);
545                 sha_text |= bstatus[0] << 8 | bstatus[1];
546                 ret = intel_write_sha_text(dev_priv, sha_text);
547                 if (ret < 0)
548                         return ret;
549                 sha_idx += sizeof(sha_text);
550
551                 /* Write 64 bits of M0 */
552                 intel_de_write(dev_priv, HDCP_REP_CTL,
553                                rep_ctl | HDCP_SHA1_TEXT_0);
554                 for (i = 0; i < 2; i++) {
555                         ret = intel_write_sha_text(dev_priv, 0);
556                         if (ret < 0)
557                                 return ret;
558                         sha_idx += sizeof(sha_text);
559                 }
560
561                 /*
562                  * Terminate the SHA-1 stream by hand. For the other leftover
563                  * cases this is appended by the hardware.
564                  */
565                 intel_de_write(dev_priv, HDCP_REP_CTL,
566                                rep_ctl | HDCP_SHA1_TEXT_32);
567                 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
568                 ret = intel_write_sha_text(dev_priv, sha_text);
569                 if (ret < 0)
570                         return ret;
571                 sha_idx += sizeof(sha_text);
572         } else if (sha_leftovers == 3) {
573                 /* Write 32 bits of text (filled from LSB) */
574                 intel_de_write(dev_priv, HDCP_REP_CTL,
575                                rep_ctl | HDCP_SHA1_TEXT_32);
576                 sha_text |= bstatus[0];
577                 ret = intel_write_sha_text(dev_priv, sha_text);
578                 if (ret < 0)
579                         return ret;
580                 sha_idx += sizeof(sha_text);
581
582                 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
583                 intel_de_write(dev_priv, HDCP_REP_CTL,
584                                rep_ctl | HDCP_SHA1_TEXT_8);
585                 ret = intel_write_sha_text(dev_priv, bstatus[1]);
586                 if (ret < 0)
587                         return ret;
588                 sha_idx += sizeof(sha_text);
589
590                 /* Write 32 bits of M0 */
591                 intel_de_write(dev_priv, HDCP_REP_CTL,
592                                rep_ctl | HDCP_SHA1_TEXT_0);
593                 ret = intel_write_sha_text(dev_priv, 0);
594                 if (ret < 0)
595                         return ret;
596                 sha_idx += sizeof(sha_text);
597
598                 /* Write 8 bits of M0 */
599                 intel_de_write(dev_priv, HDCP_REP_CTL,
600                                rep_ctl | HDCP_SHA1_TEXT_24);
601                 ret = intel_write_sha_text(dev_priv, 0);
602                 if (ret < 0)
603                         return ret;
604                 sha_idx += sizeof(sha_text);
605         } else {
606                 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
607                             sha_leftovers);
608                 return -EINVAL;
609         }
610
611         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
612         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
613         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
614                 ret = intel_write_sha_text(dev_priv, 0);
615                 if (ret < 0)
616                         return ret;
617                 sha_idx += sizeof(sha_text);
618         }
619
620         /*
621          * Last write gets the length of the concatenation in bits. That is:
622          *  - 5 bytes per device
623          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
624          */
625         sha_text = (num_downstream * 5 + 10) * 8;
626         ret = intel_write_sha_text(dev_priv, sha_text);
627         if (ret < 0)
628                 return ret;
629
630         /* Tell the HW we're done with the hash and wait for it to ACK */
631         intel_de_write(dev_priv, HDCP_REP_CTL,
632                        rep_ctl | HDCP_SHA1_COMPLETE_HASH);
633         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
634                                   HDCP_SHA1_COMPLETE, 1)) {
635                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
636                 return -ETIMEDOUT;
637         }
638         if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
639                 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
640                 return -ENXIO;
641         }
642
643         return 0;
644 }
645
646 /* Implements Part 2 of the HDCP authorization procedure */
647 static
648 int intel_hdcp_auth_downstream(struct intel_connector *connector)
649 {
650         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
651         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
652         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
653         u8 bstatus[2], num_downstream, *ksv_fifo;
654         int ret, i, tries = 3;
655
656         ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
657         if (ret) {
658                 drm_dbg_kms(&dev_priv->drm,
659                             "KSV list failed to become ready (%d)\n", ret);
660                 return ret;
661         }
662
663         ret = shim->read_bstatus(dig_port, bstatus);
664         if (ret)
665                 return ret;
666
667         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
668             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
669                 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
670                 return -EPERM;
671         }
672
673         /*
674          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
675          * the HDCP encryption. That implies that repeater can't have its own
676          * display. As there is no consumption of encrypted content in the
677          * repeater with 0 downstream devices, we are failing the
678          * authentication.
679          */
680         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
681         if (num_downstream == 0) {
682                 drm_dbg_kms(&dev_priv->drm,
683                             "Repeater with zero downstream devices\n");
684                 return -EINVAL;
685         }
686
687         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
688         if (!ksv_fifo) {
689                 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
690                 return -ENOMEM;
691         }
692
693         ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
694         if (ret)
695                 goto err;
696
697         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
698                                         num_downstream) > 0) {
699                 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
700                 ret = -EPERM;
701                 goto err;
702         }
703
704         /*
705          * When V prime mismatches, DP Spec mandates re-read of
706          * V prime atleast twice.
707          */
708         for (i = 0; i < tries; i++) {
709                 ret = intel_hdcp_validate_v_prime(connector, shim,
710                                                   ksv_fifo, num_downstream,
711                                                   bstatus);
712                 if (!ret)
713                         break;
714         }
715
716         if (i == tries) {
717                 drm_dbg_kms(&dev_priv->drm,
718                             "V Prime validation failed.(%d)\n", ret);
719                 goto err;
720         }
721
722         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
723                     num_downstream);
724         ret = 0;
725 err:
726         kfree(ksv_fifo);
727         return ret;
728 }
729
730 /* Implements Part 1 of the HDCP authorization procedure */
731 static int intel_hdcp_auth(struct intel_connector *connector)
732 {
733         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
734         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
735         struct intel_hdcp *hdcp = &connector->hdcp;
736         const struct intel_hdcp_shim *shim = hdcp->shim;
737         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
738         enum port port = dig_port->base.port;
739         unsigned long r0_prime_gen_start;
740         int ret, i, tries = 2;
741         union {
742                 u32 reg[2];
743                 u8 shim[DRM_HDCP_AN_LEN];
744         } an;
745         union {
746                 u32 reg[2];
747                 u8 shim[DRM_HDCP_KSV_LEN];
748         } bksv;
749         union {
750                 u32 reg;
751                 u8 shim[DRM_HDCP_RI_LEN];
752         } ri;
753         bool repeater_present, hdcp_capable;
754
755         /*
756          * Detects whether the display is HDCP capable. Although we check for
757          * valid Bksv below, the HDCP over DP spec requires that we check
758          * whether the display supports HDCP before we write An. For HDMI
759          * displays, this is not necessary.
760          */
761         if (shim->hdcp_capable) {
762                 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
763                 if (ret)
764                         return ret;
765                 if (!hdcp_capable) {
766                         drm_dbg_kms(&dev_priv->drm,
767                                     "Panel is not HDCP capable\n");
768                         return -EINVAL;
769                 }
770         }
771
772         /* Initialize An with 2 random values and acquire it */
773         for (i = 0; i < 2; i++)
774                 intel_de_write(dev_priv,
775                                HDCP_ANINIT(dev_priv, cpu_transcoder, port),
776                                get_random_u32());
777         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
778                        HDCP_CONF_CAPTURE_AN);
779
780         /* Wait for An to be acquired */
781         if (intel_de_wait_for_set(dev_priv,
782                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
783                                   HDCP_STATUS_AN_READY, 1)) {
784                 drm_err(&dev_priv->drm, "Timed out waiting for An\n");
785                 return -ETIMEDOUT;
786         }
787
788         an.reg[0] = intel_de_read(dev_priv,
789                                   HDCP_ANLO(dev_priv, cpu_transcoder, port));
790         an.reg[1] = intel_de_read(dev_priv,
791                                   HDCP_ANHI(dev_priv, cpu_transcoder, port));
792         ret = shim->write_an_aksv(dig_port, an.shim);
793         if (ret)
794                 return ret;
795
796         r0_prime_gen_start = jiffies;
797
798         memset(&bksv, 0, sizeof(bksv));
799
800         ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
801         if (ret < 0)
802                 return ret;
803
804         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
805                 drm_err(&dev_priv->drm, "BKSV is revoked\n");
806                 return -EPERM;
807         }
808
809         intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
810                        bksv.reg[0]);
811         intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
812                        bksv.reg[1]);
813
814         ret = shim->repeater_present(dig_port, &repeater_present);
815         if (ret)
816                 return ret;
817         if (repeater_present)
818                 intel_de_write(dev_priv, HDCP_REP_CTL,
819                                intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
820
821         ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
822         if (ret)
823                 return ret;
824
825         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
826                        HDCP_CONF_AUTH_AND_ENC);
827
828         /* Wait for R0 ready */
829         if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
830                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
831                 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
832                 return -ETIMEDOUT;
833         }
834
835         /*
836          * Wait for R0' to become available. The spec says 100ms from Aksv, but
837          * some monitors can take longer than this. We'll set the timeout at
838          * 300ms just to be sure.
839          *
840          * On DP, there's an R0_READY bit available but no such bit
841          * exists on HDMI. Since the upper-bound is the same, we'll just do
842          * the stupid thing instead of polling on one and not the other.
843          */
844         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
845
846         tries = 3;
847
848         /*
849          * DP HDCP Spec mandates the two more reattempt to read R0, incase
850          * of R0 mismatch.
851          */
852         for (i = 0; i < tries; i++) {
853                 ri.reg = 0;
854                 ret = shim->read_ri_prime(dig_port, ri.shim);
855                 if (ret)
856                         return ret;
857                 intel_de_write(dev_priv,
858                                HDCP_RPRIME(dev_priv, cpu_transcoder, port),
859                                ri.reg);
860
861                 /* Wait for Ri prime match */
862                 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
863                               (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
864                         break;
865         }
866
867         if (i == tries) {
868                 drm_dbg_kms(&dev_priv->drm,
869                             "Timed out waiting for Ri prime match (%x)\n",
870                             intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
871                                           cpu_transcoder, port)));
872                 return -ETIMEDOUT;
873         }
874
875         /* Wait for encryption confirmation */
876         if (intel_de_wait_for_set(dev_priv,
877                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
878                                   HDCP_STATUS_ENC,
879                                   HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
880                 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
881                 return -ETIMEDOUT;
882         }
883
884         /* DP MST Auth Part 1 Step 2.a and Step 2.b */
885         if (shim->stream_encryption) {
886                 ret = shim->stream_encryption(connector, true);
887                 if (ret) {
888                         drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
889                                 connector->base.name, connector->base.base.id);
890                         return ret;
891                 }
892                 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
893                             transcoder_name(hdcp->stream_transcoder));
894         }
895
896         if (repeater_present)
897                 return intel_hdcp_auth_downstream(connector);
898
899         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
900         return 0;
901 }
902
903 static int _intel_hdcp_disable(struct intel_connector *connector)
904 {
905         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
906         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
907         struct intel_hdcp *hdcp = &connector->hdcp;
908         enum port port = dig_port->base.port;
909         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
910         u32 repeater_ctl;
911         int ret;
912
913         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
914                     connector->base.name, connector->base.base.id);
915
916         if (hdcp->shim->stream_encryption) {
917                 ret = hdcp->shim->stream_encryption(connector, false);
918                 if (ret) {
919                         drm_err(&dev_priv->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
920                                 connector->base.name, connector->base.base.id);
921                         return ret;
922                 }
923                 drm_dbg_kms(&dev_priv->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
924                             transcoder_name(hdcp->stream_transcoder));
925                 /*
926                  * If there are other connectors on this port using HDCP,
927                  * don't disable it until it disabled HDCP encryption for
928                  * all connectors in MST topology.
929                  */
930                 if (dig_port->num_hdcp_streams > 0)
931                         return 0;
932         }
933
934         hdcp->hdcp_encrypted = false;
935         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
936         if (intel_de_wait_for_clear(dev_priv,
937                                     HDCP_STATUS(dev_priv, cpu_transcoder, port),
938                                     ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
939                 drm_err(&dev_priv->drm,
940                         "Failed to disable HDCP, timeout clearing status\n");
941                 return -ETIMEDOUT;
942         }
943
944         repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
945                                                    port);
946         intel_de_write(dev_priv, HDCP_REP_CTL,
947                        intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
948
949         ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
950         if (ret) {
951                 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
952                 return ret;
953         }
954
955         drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
956         return 0;
957 }
958
959 static int _intel_hdcp_enable(struct intel_connector *connector)
960 {
961         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
962         struct intel_hdcp *hdcp = &connector->hdcp;
963         int i, ret, tries = 3;
964
965         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
966                     connector->base.name, connector->base.base.id);
967
968         if (!hdcp_key_loadable(dev_priv)) {
969                 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
970                 return -ENXIO;
971         }
972
973         for (i = 0; i < KEY_LOAD_TRIES; i++) {
974                 ret = intel_hdcp_load_keys(dev_priv);
975                 if (!ret)
976                         break;
977                 intel_hdcp_clear_keys(dev_priv);
978         }
979         if (ret) {
980                 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
981                         ret);
982                 return ret;
983         }
984
985         /* Incase of authentication failures, HDCP spec expects reauth. */
986         for (i = 0; i < tries; i++) {
987                 ret = intel_hdcp_auth(connector);
988                 if (!ret) {
989                         hdcp->hdcp_encrypted = true;
990                         return 0;
991                 }
992
993                 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
994
995                 /* Ensuring HDCP encryption and signalling are stopped. */
996                 _intel_hdcp_disable(connector);
997         }
998
999         drm_dbg_kms(&dev_priv->drm,
1000                     "HDCP authentication failed (%d tries/%d)\n", tries, ret);
1001         return ret;
1002 }
1003
1004 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
1005 {
1006         return container_of(hdcp, struct intel_connector, hdcp);
1007 }
1008
1009 static void intel_hdcp_update_value(struct intel_connector *connector,
1010                                     u64 value, bool update_property)
1011 {
1012         struct drm_device *dev = connector->base.dev;
1013         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1014         struct intel_hdcp *hdcp = &connector->hdcp;
1015
1016         drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
1017
1018         if (hdcp->value == value)
1019                 return;
1020
1021         drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
1022
1023         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1024                 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
1025                         dig_port->num_hdcp_streams--;
1026         } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
1027                 dig_port->num_hdcp_streams++;
1028         }
1029
1030         hdcp->value = value;
1031         if (update_property) {
1032                 drm_connector_get(&connector->base);
1033                 schedule_work(&hdcp->prop_work);
1034         }
1035 }
1036
1037 /* Implements Part 3 of the HDCP authorization procedure */
1038 static int intel_hdcp_check_link(struct intel_connector *connector)
1039 {
1040         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1041         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1042         struct intel_hdcp *hdcp = &connector->hdcp;
1043         enum port port = dig_port->base.port;
1044         enum transcoder cpu_transcoder;
1045         int ret = 0;
1046
1047         mutex_lock(&hdcp->mutex);
1048         mutex_lock(&dig_port->hdcp_mutex);
1049
1050         cpu_transcoder = hdcp->cpu_transcoder;
1051
1052         /* Check_link valid only when HDCP1.4 is enabled */
1053         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1054             !hdcp->hdcp_encrypted) {
1055                 ret = -EINVAL;
1056                 goto out;
1057         }
1058
1059         if (drm_WARN_ON(&dev_priv->drm,
1060                         !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
1061                 drm_err(&dev_priv->drm,
1062                         "%s:%d HDCP link stopped encryption,%x\n",
1063                         connector->base.name, connector->base.base.id,
1064                         intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
1065                 ret = -ENXIO;
1066                 intel_hdcp_update_value(connector,
1067                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1068                                         true);
1069                 goto out;
1070         }
1071
1072         if (hdcp->shim->check_link(dig_port, connector)) {
1073                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1074                         intel_hdcp_update_value(connector,
1075                                 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1076                 }
1077                 goto out;
1078         }
1079
1080         drm_dbg_kms(&dev_priv->drm,
1081                     "[%s:%d] HDCP link failed, retrying authentication\n",
1082                     connector->base.name, connector->base.base.id);
1083
1084         ret = _intel_hdcp_disable(connector);
1085         if (ret) {
1086                 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
1087                 intel_hdcp_update_value(connector,
1088                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1089                                         true);
1090                 goto out;
1091         }
1092
1093         ret = _intel_hdcp_enable(connector);
1094         if (ret) {
1095                 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
1096                 intel_hdcp_update_value(connector,
1097                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1098                                         true);
1099                 goto out;
1100         }
1101
1102 out:
1103         mutex_unlock(&dig_port->hdcp_mutex);
1104         mutex_unlock(&hdcp->mutex);
1105         return ret;
1106 }
1107
1108 static void intel_hdcp_prop_work(struct work_struct *work)
1109 {
1110         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1111                                                prop_work);
1112         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1113         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1114
1115         drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
1116         mutex_lock(&hdcp->mutex);
1117
1118         /*
1119          * This worker is only used to flip between ENABLED/DESIRED. Either of
1120          * those to UNDESIRED is handled by core. If value == UNDESIRED,
1121          * we're running just after hdcp has been disabled, so just exit
1122          */
1123         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1124                 drm_hdcp_update_content_protection(&connector->base,
1125                                                    hdcp->value);
1126
1127         mutex_unlock(&hdcp->mutex);
1128         drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1129
1130         drm_connector_put(&connector->base);
1131 }
1132
1133 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1134 {
1135         return RUNTIME_INFO(dev_priv)->has_hdcp &&
1136                 (DISPLAY_VER(dev_priv) >= 12 || port < PORT_E);
1137 }
1138
1139 static int
1140 hdcp2_prepare_ake_init(struct intel_connector *connector,
1141                        struct hdcp2_ake_init *ake_data)
1142 {
1143         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1144         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1145         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1146         struct i915_hdcp_comp_master *comp;
1147         int ret;
1148
1149         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1150         comp = dev_priv->display.hdcp.master;
1151
1152         if (!comp || !comp->ops) {
1153                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1154                 return -EINVAL;
1155         }
1156
1157         ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1158         if (ret)
1159                 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1160                             ret);
1161         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1162
1163         return ret;
1164 }
1165
1166 static int
1167 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1168                                 struct hdcp2_ake_send_cert *rx_cert,
1169                                 bool *paired,
1170                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
1171                                 size_t *msg_sz)
1172 {
1173         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1174         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1175         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1176         struct i915_hdcp_comp_master *comp;
1177         int ret;
1178
1179         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1180         comp = dev_priv->display.hdcp.master;
1181
1182         if (!comp || !comp->ops) {
1183                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1184                 return -EINVAL;
1185         }
1186
1187         ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1188                                                          rx_cert, paired,
1189                                                          ek_pub_km, msg_sz);
1190         if (ret < 0)
1191                 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1192                             ret);
1193         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1194
1195         return ret;
1196 }
1197
1198 static int hdcp2_verify_hprime(struct intel_connector *connector,
1199                                struct hdcp2_ake_send_hprime *rx_hprime)
1200 {
1201         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1202         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1203         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1204         struct i915_hdcp_comp_master *comp;
1205         int ret;
1206
1207         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1208         comp = dev_priv->display.hdcp.master;
1209
1210         if (!comp || !comp->ops) {
1211                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1212                 return -EINVAL;
1213         }
1214
1215         ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1216         if (ret < 0)
1217                 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1218         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1219
1220         return ret;
1221 }
1222
1223 static int
1224 hdcp2_store_pairing_info(struct intel_connector *connector,
1225                          struct hdcp2_ake_send_pairing_info *pairing_info)
1226 {
1227         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1228         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1229         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1230         struct i915_hdcp_comp_master *comp;
1231         int ret;
1232
1233         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1234         comp = dev_priv->display.hdcp.master;
1235
1236         if (!comp || !comp->ops) {
1237                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1238                 return -EINVAL;
1239         }
1240
1241         ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1242         if (ret < 0)
1243                 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1244                             ret);
1245         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1246
1247         return ret;
1248 }
1249
1250 static int
1251 hdcp2_prepare_lc_init(struct intel_connector *connector,
1252                       struct hdcp2_lc_init *lc_init)
1253 {
1254         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1255         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1256         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1257         struct i915_hdcp_comp_master *comp;
1258         int ret;
1259
1260         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1261         comp = dev_priv->display.hdcp.master;
1262
1263         if (!comp || !comp->ops) {
1264                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1265                 return -EINVAL;
1266         }
1267
1268         ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1269         if (ret < 0)
1270                 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1271                             ret);
1272         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1273
1274         return ret;
1275 }
1276
1277 static int
1278 hdcp2_verify_lprime(struct intel_connector *connector,
1279                     struct hdcp2_lc_send_lprime *rx_lprime)
1280 {
1281         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1282         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1283         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1284         struct i915_hdcp_comp_master *comp;
1285         int ret;
1286
1287         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1288         comp = dev_priv->display.hdcp.master;
1289
1290         if (!comp || !comp->ops) {
1291                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1292                 return -EINVAL;
1293         }
1294
1295         ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1296         if (ret < 0)
1297                 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1298                             ret);
1299         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1300
1301         return ret;
1302 }
1303
1304 static int hdcp2_prepare_skey(struct intel_connector *connector,
1305                               struct hdcp2_ske_send_eks *ske_data)
1306 {
1307         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1308         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1309         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1310         struct i915_hdcp_comp_master *comp;
1311         int ret;
1312
1313         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1314         comp = dev_priv->display.hdcp.master;
1315
1316         if (!comp || !comp->ops) {
1317                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1318                 return -EINVAL;
1319         }
1320
1321         ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1322         if (ret < 0)
1323                 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1324                             ret);
1325         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1326
1327         return ret;
1328 }
1329
1330 static int
1331 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1332                                       struct hdcp2_rep_send_receiverid_list
1333                                                                 *rep_topology,
1334                                       struct hdcp2_rep_send_ack *rep_send_ack)
1335 {
1336         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1337         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1338         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1339         struct i915_hdcp_comp_master *comp;
1340         int ret;
1341
1342         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1343         comp = dev_priv->display.hdcp.master;
1344
1345         if (!comp || !comp->ops) {
1346                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1347                 return -EINVAL;
1348         }
1349
1350         ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1351                                                          rep_topology,
1352                                                          rep_send_ack);
1353         if (ret < 0)
1354                 drm_dbg_kms(&dev_priv->drm,
1355                             "Verify rep topology failed. %d\n", ret);
1356         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1357
1358         return ret;
1359 }
1360
1361 static int
1362 hdcp2_verify_mprime(struct intel_connector *connector,
1363                     struct hdcp2_rep_stream_ready *stream_ready)
1364 {
1365         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1366         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1367         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1368         struct i915_hdcp_comp_master *comp;
1369         int ret;
1370
1371         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1372         comp = dev_priv->display.hdcp.master;
1373
1374         if (!comp || !comp->ops) {
1375                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1376                 return -EINVAL;
1377         }
1378
1379         ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1380         if (ret < 0)
1381                 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1382         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1383
1384         return ret;
1385 }
1386
1387 static int hdcp2_authenticate_port(struct intel_connector *connector)
1388 {
1389         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1390         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1391         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1392         struct i915_hdcp_comp_master *comp;
1393         int ret;
1394
1395         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1396         comp = dev_priv->display.hdcp.master;
1397
1398         if (!comp || !comp->ops) {
1399                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1400                 return -EINVAL;
1401         }
1402
1403         ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1404         if (ret < 0)
1405                 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1406                             ret);
1407         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1408
1409         return ret;
1410 }
1411
1412 static int hdcp2_close_mei_session(struct intel_connector *connector)
1413 {
1414         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1415         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1416         struct i915_hdcp_comp_master *comp;
1417         int ret;
1418
1419         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
1420         comp = dev_priv->display.hdcp.master;
1421
1422         if (!comp || !comp->ops) {
1423                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1424                 return -EINVAL;
1425         }
1426
1427         ret = comp->ops->close_hdcp_session(comp->mei_dev,
1428                                              &dig_port->hdcp_port_data);
1429         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
1430
1431         return ret;
1432 }
1433
1434 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1435 {
1436         return hdcp2_close_mei_session(connector);
1437 }
1438
1439 /* Authentication flow starts from here */
1440 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1441 {
1442         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1443         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1444         struct intel_hdcp *hdcp = &connector->hdcp;
1445         union {
1446                 struct hdcp2_ake_init ake_init;
1447                 struct hdcp2_ake_send_cert send_cert;
1448                 struct hdcp2_ake_no_stored_km no_stored_km;
1449                 struct hdcp2_ake_send_hprime send_hprime;
1450                 struct hdcp2_ake_send_pairing_info pairing_info;
1451         } msgs;
1452         const struct intel_hdcp_shim *shim = hdcp->shim;
1453         size_t size;
1454         int ret;
1455
1456         /* Init for seq_num */
1457         hdcp->seq_num_v = 0;
1458         hdcp->seq_num_m = 0;
1459
1460         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1461         if (ret < 0)
1462                 return ret;
1463
1464         ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1465                                   sizeof(msgs.ake_init));
1466         if (ret < 0)
1467                 return ret;
1468
1469         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1470                                  &msgs.send_cert, sizeof(msgs.send_cert));
1471         if (ret < 0)
1472                 return ret;
1473
1474         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1475                 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1476                 return -EINVAL;
1477         }
1478
1479         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1480
1481         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1482                                         msgs.send_cert.cert_rx.receiver_id,
1483                                         1) > 0) {
1484                 drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1485                 return -EPERM;
1486         }
1487
1488         /*
1489          * Here msgs.no_stored_km will hold msgs corresponding to the km
1490          * stored also.
1491          */
1492         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1493                                               &hdcp->is_paired,
1494                                               &msgs.no_stored_km, &size);
1495         if (ret < 0)
1496                 return ret;
1497
1498         ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1499         if (ret < 0)
1500                 return ret;
1501
1502         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1503                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1504         if (ret < 0)
1505                 return ret;
1506
1507         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1508         if (ret < 0)
1509                 return ret;
1510
1511         if (!hdcp->is_paired) {
1512                 /* Pairing is required */
1513                 ret = shim->read_2_2_msg(dig_port,
1514                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1515                                          &msgs.pairing_info,
1516                                          sizeof(msgs.pairing_info));
1517                 if (ret < 0)
1518                         return ret;
1519
1520                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1521                 if (ret < 0)
1522                         return ret;
1523                 hdcp->is_paired = true;
1524         }
1525
1526         return 0;
1527 }
1528
1529 static int hdcp2_locality_check(struct intel_connector *connector)
1530 {
1531         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1532         struct intel_hdcp *hdcp = &connector->hdcp;
1533         union {
1534                 struct hdcp2_lc_init lc_init;
1535                 struct hdcp2_lc_send_lprime send_lprime;
1536         } msgs;
1537         const struct intel_hdcp_shim *shim = hdcp->shim;
1538         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1539
1540         for (i = 0; i < tries; i++) {
1541                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1542                 if (ret < 0)
1543                         continue;
1544
1545                 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1546                                       sizeof(msgs.lc_init));
1547                 if (ret < 0)
1548                         continue;
1549
1550                 ret = shim->read_2_2_msg(dig_port,
1551                                          HDCP_2_2_LC_SEND_LPRIME,
1552                                          &msgs.send_lprime,
1553                                          sizeof(msgs.send_lprime));
1554                 if (ret < 0)
1555                         continue;
1556
1557                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1558                 if (!ret)
1559                         break;
1560         }
1561
1562         return ret;
1563 }
1564
1565 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1566 {
1567         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1568         struct intel_hdcp *hdcp = &connector->hdcp;
1569         struct hdcp2_ske_send_eks send_eks;
1570         int ret;
1571
1572         ret = hdcp2_prepare_skey(connector, &send_eks);
1573         if (ret < 0)
1574                 return ret;
1575
1576         ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1577                                         sizeof(send_eks));
1578         if (ret < 0)
1579                 return ret;
1580
1581         return 0;
1582 }
1583
1584 static
1585 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1586 {
1587         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1588         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1589         struct intel_hdcp *hdcp = &connector->hdcp;
1590         union {
1591                 struct hdcp2_rep_stream_manage stream_manage;
1592                 struct hdcp2_rep_stream_ready stream_ready;
1593         } msgs;
1594         const struct intel_hdcp_shim *shim = hdcp->shim;
1595         int ret, streams_size_delta, i;
1596
1597         if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1598                 return -ERANGE;
1599
1600         /* Prepare RepeaterAuth_Stream_Manage msg */
1601         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1602         drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1603
1604         msgs.stream_manage.k = cpu_to_be16(data->k);
1605
1606         for (i = 0; i < data->k; i++) {
1607                 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1608                 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1609         }
1610
1611         streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1612                                 sizeof(struct hdcp2_streamid_type);
1613         /* Send it to Repeater */
1614         ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1615                                   sizeof(msgs.stream_manage) - streams_size_delta);
1616         if (ret < 0)
1617                 goto out;
1618
1619         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1620                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1621         if (ret < 0)
1622                 goto out;
1623
1624         data->seq_num_m = hdcp->seq_num_m;
1625
1626         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1627
1628 out:
1629         hdcp->seq_num_m++;
1630
1631         return ret;
1632 }
1633
1634 static
1635 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1636 {
1637         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1638         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1639         struct intel_hdcp *hdcp = &connector->hdcp;
1640         union {
1641                 struct hdcp2_rep_send_receiverid_list recvid_list;
1642                 struct hdcp2_rep_send_ack rep_ack;
1643         } msgs;
1644         const struct intel_hdcp_shim *shim = hdcp->shim;
1645         u32 seq_num_v, device_cnt;
1646         u8 *rx_info;
1647         int ret;
1648
1649         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1650                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1651         if (ret < 0)
1652                 return ret;
1653
1654         rx_info = msgs.recvid_list.rx_info;
1655
1656         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1657             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1658                 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1659                 return -EINVAL;
1660         }
1661
1662         /*
1663          * MST topology is not Type 1 capable if it contains a downstream
1664          * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1665          */
1666         dig_port->hdcp_mst_type1_capable =
1667                 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1668                 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1669
1670         /* Converting and Storing the seq_num_v to local variable as DWORD */
1671         seq_num_v =
1672                 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1673
1674         if (!hdcp->hdcp2_encrypted && seq_num_v) {
1675                 drm_dbg_kms(&dev_priv->drm,
1676                             "Non zero Seq_num_v at first RecvId_List msg\n");
1677                 return -EINVAL;
1678         }
1679
1680         if (seq_num_v < hdcp->seq_num_v) {
1681                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1682                 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1683                 return -EINVAL;
1684         }
1685
1686         device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1687                       HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1688         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1689                                         msgs.recvid_list.receiver_ids,
1690                                         device_cnt) > 0) {
1691                 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1692                 return -EPERM;
1693         }
1694
1695         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1696                                                     &msgs.recvid_list,
1697                                                     &msgs.rep_ack);
1698         if (ret < 0)
1699                 return ret;
1700
1701         hdcp->seq_num_v = seq_num_v;
1702         ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1703                                   sizeof(msgs.rep_ack));
1704         if (ret < 0)
1705                 return ret;
1706
1707         return 0;
1708 }
1709
1710 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1711 {
1712         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1713         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1714         struct intel_hdcp *hdcp = &connector->hdcp;
1715         const struct intel_hdcp_shim *shim = hdcp->shim;
1716         int ret;
1717
1718         ret = hdcp2_authentication_key_exchange(connector);
1719         if (ret < 0) {
1720                 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1721                 return ret;
1722         }
1723
1724         ret = hdcp2_locality_check(connector);
1725         if (ret < 0) {
1726                 drm_dbg_kms(&i915->drm,
1727                             "Locality Check failed. Err : %d\n", ret);
1728                 return ret;
1729         }
1730
1731         ret = hdcp2_session_key_exchange(connector);
1732         if (ret < 0) {
1733                 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1734                 return ret;
1735         }
1736
1737         if (shim->config_stream_type) {
1738                 ret = shim->config_stream_type(dig_port,
1739                                                hdcp->is_repeater,
1740                                                hdcp->content_type);
1741                 if (ret < 0)
1742                         return ret;
1743         }
1744
1745         if (hdcp->is_repeater) {
1746                 ret = hdcp2_authenticate_repeater_topology(connector);
1747                 if (ret < 0) {
1748                         drm_dbg_kms(&i915->drm,
1749                                     "Repeater Auth Failed. Err: %d\n", ret);
1750                         return ret;
1751                 }
1752         }
1753
1754         return ret;
1755 }
1756
1757 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1758 {
1759         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1760         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1761         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1762         struct intel_hdcp *hdcp = &connector->hdcp;
1763         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1764         enum port port = dig_port->base.port;
1765         int ret = 0;
1766
1767         if (!(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1768                             LINK_ENCRYPTION_STATUS)) {
1769                 drm_err(&dev_priv->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1770                         connector->base.name, connector->base.base.id);
1771                 ret = -EPERM;
1772                 goto link_recover;
1773         }
1774
1775         if (hdcp->shim->stream_2_2_encryption) {
1776                 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1777                 if (ret) {
1778                         drm_err(&dev_priv->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1779                                 connector->base.name, connector->base.base.id);
1780                         return ret;
1781                 }
1782                 drm_dbg_kms(&dev_priv->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1783                             transcoder_name(hdcp->stream_transcoder));
1784         }
1785
1786         return 0;
1787
1788 link_recover:
1789         if (hdcp2_deauthenticate_port(connector) < 0)
1790                 drm_dbg_kms(&dev_priv->drm, "Port deauth failed.\n");
1791
1792         dig_port->hdcp_auth_status = false;
1793         data->k = 0;
1794
1795         return ret;
1796 }
1797
1798 static int hdcp2_enable_encryption(struct intel_connector *connector)
1799 {
1800         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1801         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1802         struct intel_hdcp *hdcp = &connector->hdcp;
1803         enum port port = dig_port->base.port;
1804         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1805         int ret;
1806
1807         drm_WARN_ON(&dev_priv->drm,
1808                     intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1809                     LINK_ENCRYPTION_STATUS);
1810         if (hdcp->shim->toggle_signalling) {
1811                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1812                                                     true);
1813                 if (ret) {
1814                         drm_err(&dev_priv->drm,
1815                                 "Failed to enable HDCP signalling. %d\n",
1816                                 ret);
1817                         return ret;
1818                 }
1819         }
1820
1821         if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1822             LINK_AUTH_STATUS) {
1823                 /* Link is Authenticated. Now set for Encryption */
1824                 intel_de_write(dev_priv,
1825                                HDCP2_CTL(dev_priv, cpu_transcoder, port),
1826                                intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1827         }
1828
1829         ret = intel_de_wait_for_set(dev_priv,
1830                                     HDCP2_STATUS(dev_priv, cpu_transcoder,
1831                                                  port),
1832                                     LINK_ENCRYPTION_STATUS,
1833                                     HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1834         dig_port->hdcp_auth_status = true;
1835
1836         return ret;
1837 }
1838
1839 static int hdcp2_disable_encryption(struct intel_connector *connector)
1840 {
1841         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1842         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1843         struct intel_hdcp *hdcp = &connector->hdcp;
1844         enum port port = dig_port->base.port;
1845         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1846         int ret;
1847
1848         drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1849                                       LINK_ENCRYPTION_STATUS));
1850
1851         intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1852                        intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1853
1854         ret = intel_de_wait_for_clear(dev_priv,
1855                                       HDCP2_STATUS(dev_priv, cpu_transcoder,
1856                                                    port),
1857                                       LINK_ENCRYPTION_STATUS,
1858                                       HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1859         if (ret == -ETIMEDOUT)
1860                 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1861
1862         if (hdcp->shim->toggle_signalling) {
1863                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1864                                                     false);
1865                 if (ret) {
1866                         drm_err(&dev_priv->drm,
1867                                 "Failed to disable HDCP signalling. %d\n",
1868                                 ret);
1869                         return ret;
1870                 }
1871         }
1872
1873         return ret;
1874 }
1875
1876 static int
1877 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1878 {
1879         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1880         int i, tries = 3, ret;
1881
1882         if (!connector->hdcp.is_repeater)
1883                 return 0;
1884
1885         for (i = 0; i < tries; i++) {
1886                 ret = _hdcp2_propagate_stream_management_info(connector);
1887                 if (!ret)
1888                         break;
1889
1890                 /* Lets restart the auth incase of seq_num_m roll over */
1891                 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1892                         drm_dbg_kms(&i915->drm,
1893                                     "seq_num_m roll over.(%d)\n", ret);
1894                         break;
1895                 }
1896
1897                 drm_dbg_kms(&i915->drm,
1898                             "HDCP2 stream management %d of %d Failed.(%d)\n",
1899                             i + 1, tries, ret);
1900         }
1901
1902         return ret;
1903 }
1904
1905 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1906 {
1907         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1908         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1909         int ret = 0, i, tries = 3;
1910
1911         for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1912                 ret = hdcp2_authenticate_sink(connector);
1913                 if (!ret) {
1914                         ret = intel_hdcp_prepare_streams(connector);
1915                         if (ret) {
1916                                 drm_dbg_kms(&i915->drm,
1917                                             "Prepare streams failed.(%d)\n",
1918                                             ret);
1919                                 break;
1920                         }
1921
1922                         ret = hdcp2_propagate_stream_management_info(connector);
1923                         if (ret) {
1924                                 drm_dbg_kms(&i915->drm,
1925                                             "Stream management failed.(%d)\n",
1926                                             ret);
1927                                 break;
1928                         }
1929
1930                         ret = hdcp2_authenticate_port(connector);
1931                         if (!ret)
1932                                 break;
1933                         drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1934                                     ret);
1935                 }
1936
1937                 /* Clearing the mei hdcp session */
1938                 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1939                             i + 1, tries, ret);
1940                 if (hdcp2_deauthenticate_port(connector) < 0)
1941                         drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1942         }
1943
1944         if (!ret && !dig_port->hdcp_auth_status) {
1945                 /*
1946                  * Ensuring the required 200mSec min time interval between
1947                  * Session Key Exchange and encryption.
1948                  */
1949                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1950                 ret = hdcp2_enable_encryption(connector);
1951                 if (ret < 0) {
1952                         drm_dbg_kms(&i915->drm,
1953                                     "Encryption Enable Failed.(%d)\n", ret);
1954                         if (hdcp2_deauthenticate_port(connector) < 0)
1955                                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1956                 }
1957         }
1958
1959         if (!ret)
1960                 ret = hdcp2_enable_stream_encryption(connector);
1961
1962         return ret;
1963 }
1964
1965 static int _intel_hdcp2_enable(struct intel_connector *connector)
1966 {
1967         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1968         struct intel_hdcp *hdcp = &connector->hdcp;
1969         int ret;
1970
1971         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1972                     connector->base.name, connector->base.base.id,
1973                     hdcp->content_type);
1974
1975         ret = hdcp2_authenticate_and_encrypt(connector);
1976         if (ret) {
1977                 drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1978                             hdcp->content_type, ret);
1979                 return ret;
1980         }
1981
1982         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1983                     connector->base.name, connector->base.base.id,
1984                     hdcp->content_type);
1985
1986         hdcp->hdcp2_encrypted = true;
1987         return 0;
1988 }
1989
1990 static int
1991 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1992 {
1993         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1994         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1995         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1996         struct intel_hdcp *hdcp = &connector->hdcp;
1997         int ret;
1998
1999         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
2000                     connector->base.name, connector->base.base.id);
2001
2002         if (hdcp->shim->stream_2_2_encryption) {
2003                 ret = hdcp->shim->stream_2_2_encryption(connector, false);
2004                 if (ret) {
2005                         drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
2006                                 connector->base.name, connector->base.base.id);
2007                         return ret;
2008                 }
2009                 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
2010                             transcoder_name(hdcp->stream_transcoder));
2011
2012                 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
2013                         return 0;
2014         }
2015
2016         ret = hdcp2_disable_encryption(connector);
2017
2018         if (hdcp2_deauthenticate_port(connector) < 0)
2019                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
2020
2021         connector->hdcp.hdcp2_encrypted = false;
2022         dig_port->hdcp_auth_status = false;
2023         data->k = 0;
2024
2025         return ret;
2026 }
2027
2028 /* Implements the Link Integrity Check for HDCP2.2 */
2029 static int intel_hdcp2_check_link(struct intel_connector *connector)
2030 {
2031         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2032         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2033         struct intel_hdcp *hdcp = &connector->hdcp;
2034         enum port port = dig_port->base.port;
2035         enum transcoder cpu_transcoder;
2036         int ret = 0;
2037
2038         mutex_lock(&hdcp->mutex);
2039         mutex_lock(&dig_port->hdcp_mutex);
2040         cpu_transcoder = hdcp->cpu_transcoder;
2041
2042         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2043         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2044             !hdcp->hdcp2_encrypted) {
2045                 ret = -EINVAL;
2046                 goto out;
2047         }
2048
2049         if (drm_WARN_ON(&dev_priv->drm,
2050                         !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
2051                 drm_err(&dev_priv->drm,
2052                         "HDCP2.2 link stopped the encryption, %x\n",
2053                         intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
2054                 ret = -ENXIO;
2055                 _intel_hdcp2_disable(connector, true);
2056                 intel_hdcp_update_value(connector,
2057                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2058                                         true);
2059                 goto out;
2060         }
2061
2062         ret = hdcp->shim->check_2_2_link(dig_port, connector);
2063         if (ret == HDCP_LINK_PROTECTED) {
2064                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2065                         intel_hdcp_update_value(connector,
2066                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2067                                         true);
2068                 }
2069                 goto out;
2070         }
2071
2072         if (ret == HDCP_TOPOLOGY_CHANGE) {
2073                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2074                         goto out;
2075
2076                 drm_dbg_kms(&dev_priv->drm,
2077                             "HDCP2.2 Downstream topology change\n");
2078                 ret = hdcp2_authenticate_repeater_topology(connector);
2079                 if (!ret) {
2080                         intel_hdcp_update_value(connector,
2081                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2082                                         true);
2083                         goto out;
2084                 }
2085                 drm_dbg_kms(&dev_priv->drm,
2086                             "[%s:%d] Repeater topology auth failed.(%d)\n",
2087                             connector->base.name, connector->base.base.id,
2088                             ret);
2089         } else {
2090                 drm_dbg_kms(&dev_priv->drm,
2091                             "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2092                             connector->base.name, connector->base.base.id);
2093         }
2094
2095         ret = _intel_hdcp2_disable(connector, true);
2096         if (ret) {
2097                 drm_err(&dev_priv->drm,
2098                         "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2099                         connector->base.name, connector->base.base.id, ret);
2100                 intel_hdcp_update_value(connector,
2101                                 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2102                 goto out;
2103         }
2104
2105         ret = _intel_hdcp2_enable(connector);
2106         if (ret) {
2107                 drm_dbg_kms(&dev_priv->drm,
2108                             "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2109                             connector->base.name, connector->base.base.id,
2110                             ret);
2111                 intel_hdcp_update_value(connector,
2112                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2113                                         true);
2114                 goto out;
2115         }
2116
2117 out:
2118         mutex_unlock(&dig_port->hdcp_mutex);
2119         mutex_unlock(&hdcp->mutex);
2120         return ret;
2121 }
2122
2123 static void intel_hdcp_check_work(struct work_struct *work)
2124 {
2125         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2126                                                struct intel_hdcp,
2127                                                check_work);
2128         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2129
2130         if (drm_connector_is_unregistered(&connector->base))
2131                 return;
2132
2133         if (!intel_hdcp2_check_link(connector))
2134                 schedule_delayed_work(&hdcp->check_work,
2135                                       DRM_HDCP2_CHECK_PERIOD_MS);
2136         else if (!intel_hdcp_check_link(connector))
2137                 schedule_delayed_work(&hdcp->check_work,
2138                                       DRM_HDCP_CHECK_PERIOD_MS);
2139 }
2140
2141 static int i915_hdcp_component_bind(struct device *i915_kdev,
2142                                     struct device *mei_kdev, void *data)
2143 {
2144         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2145
2146         drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
2147         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2148         dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data;
2149         dev_priv->display.hdcp.master->mei_dev = mei_kdev;
2150         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2151
2152         return 0;
2153 }
2154
2155 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2156                                        struct device *mei_kdev, void *data)
2157 {
2158         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
2159
2160         drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
2161         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2162         dev_priv->display.hdcp.master = NULL;
2163         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2164 }
2165
2166 static const struct component_ops i915_hdcp_component_ops = {
2167         .bind   = i915_hdcp_component_bind,
2168         .unbind = i915_hdcp_component_unbind,
2169 };
2170
2171 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
2172 {
2173         switch (port) {
2174         case PORT_A:
2175                 return MEI_DDI_A;
2176         case PORT_B ... PORT_F:
2177                 return (enum mei_fw_ddi)port;
2178         default:
2179                 return MEI_DDI_INVALID_PORT;
2180         }
2181 }
2182
2183 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
2184 {
2185         switch (cpu_transcoder) {
2186         case TRANSCODER_A ... TRANSCODER_D:
2187                 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
2188         default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2189                 return MEI_INVALID_TRANSCODER;
2190         }
2191 }
2192
2193 static int initialize_hdcp_port_data(struct intel_connector *connector,
2194                                      struct intel_digital_port *dig_port,
2195                                      const struct intel_hdcp_shim *shim)
2196 {
2197         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2198         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2199         struct intel_hdcp *hdcp = &connector->hdcp;
2200         enum port port = dig_port->base.port;
2201
2202         if (DISPLAY_VER(dev_priv) < 12)
2203                 data->fw_ddi = intel_get_mei_fw_ddi_index(port);
2204         else
2205                 /*
2206                  * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
2207                  * with zero(INVALID PORT index).
2208                  */
2209                 data->fw_ddi = MEI_DDI_INVALID_PORT;
2210
2211         /*
2212          * As associated transcoder is set and modified at modeset, here fw_tc
2213          * is initialized to zero (invalid transcoder index). This will be
2214          * retained for <Gen12 forever.
2215          */
2216         data->fw_tc = MEI_INVALID_TRANSCODER;
2217
2218         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2219         data->protocol = (u8)shim->protocol;
2220
2221         if (!data->streams)
2222                 data->streams = kcalloc(INTEL_NUM_PIPES(dev_priv),
2223                                         sizeof(struct hdcp2_streamid_type),
2224                                         GFP_KERNEL);
2225         if (!data->streams) {
2226                 drm_err(&dev_priv->drm, "Out of Memory\n");
2227                 return -ENOMEM;
2228         }
2229         /* For SST */
2230         data->streams[0].stream_id = 0;
2231         data->streams[0].stream_type = hdcp->content_type;
2232
2233         return 0;
2234 }
2235
2236 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2237 {
2238         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2239                 return false;
2240
2241         return (DISPLAY_VER(dev_priv) >= 10 ||
2242                 IS_KABYLAKE(dev_priv) ||
2243                 IS_COFFEELAKE(dev_priv) ||
2244                 IS_COMETLAKE(dev_priv));
2245 }
2246
2247 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2248 {
2249         int ret;
2250
2251         if (!is_hdcp2_supported(dev_priv))
2252                 return;
2253
2254         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2255         drm_WARN_ON(&dev_priv->drm, dev_priv->display.hdcp.comp_added);
2256
2257         dev_priv->display.hdcp.comp_added = true;
2258         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2259         ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2260                                   I915_COMPONENT_HDCP);
2261         if (ret < 0) {
2262                 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2263                             ret);
2264                 mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2265                 dev_priv->display.hdcp.comp_added = false;
2266                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2267                 return;
2268         }
2269 }
2270
2271 static void intel_hdcp2_init(struct intel_connector *connector,
2272                              struct intel_digital_port *dig_port,
2273                              const struct intel_hdcp_shim *shim)
2274 {
2275         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2276         struct intel_hdcp *hdcp = &connector->hdcp;
2277         int ret;
2278
2279         ret = initialize_hdcp_port_data(connector, dig_port, shim);
2280         if (ret) {
2281                 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2282                 return;
2283         }
2284
2285         hdcp->hdcp2_supported = true;
2286 }
2287
2288 int intel_hdcp_init(struct intel_connector *connector,
2289                     struct intel_digital_port *dig_port,
2290                     const struct intel_hdcp_shim *shim)
2291 {
2292         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2293         struct intel_hdcp *hdcp = &connector->hdcp;
2294         int ret;
2295
2296         if (!shim)
2297                 return -EINVAL;
2298
2299         if (is_hdcp2_supported(dev_priv))
2300                 intel_hdcp2_init(connector, dig_port, shim);
2301
2302         ret =
2303         drm_connector_attach_content_protection_property(&connector->base,
2304                                                          hdcp->hdcp2_supported);
2305         if (ret) {
2306                 hdcp->hdcp2_supported = false;
2307                 kfree(dig_port->hdcp_port_data.streams);
2308                 return ret;
2309         }
2310
2311         hdcp->shim = shim;
2312         mutex_init(&hdcp->mutex);
2313         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2314         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2315         init_waitqueue_head(&hdcp->cp_irq_queue);
2316
2317         return 0;
2318 }
2319
2320 int intel_hdcp_enable(struct intel_connector *connector,
2321                       const struct intel_crtc_state *pipe_config, u8 content_type)
2322 {
2323         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2324         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2325         struct intel_hdcp *hdcp = &connector->hdcp;
2326         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2327         int ret = -EINVAL;
2328
2329         if (!hdcp->shim)
2330                 return -ENOENT;
2331
2332         if (!connector->encoder) {
2333                 drm_err(&dev_priv->drm, "[%s:%d] encoder is not initialized\n",
2334                         connector->base.name, connector->base.base.id);
2335                 return -ENODEV;
2336         }
2337
2338         mutex_lock(&hdcp->mutex);
2339         mutex_lock(&dig_port->hdcp_mutex);
2340         drm_WARN_ON(&dev_priv->drm,
2341                     hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2342         hdcp->content_type = content_type;
2343
2344         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2345                 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2346                 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2347         } else {
2348                 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2349                 hdcp->stream_transcoder = INVALID_TRANSCODER;
2350         }
2351
2352         if (DISPLAY_VER(dev_priv) >= 12)
2353                 dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder);
2354
2355         /*
2356          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2357          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2358          */
2359         if (intel_hdcp2_capable(connector)) {
2360                 ret = _intel_hdcp2_enable(connector);
2361                 if (!ret)
2362                         check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2363         }
2364
2365         /*
2366          * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2367          * be attempted.
2368          */
2369         if (ret && intel_hdcp_capable(connector) &&
2370             hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2371                 ret = _intel_hdcp_enable(connector);
2372         }
2373
2374         if (!ret) {
2375                 schedule_delayed_work(&hdcp->check_work, check_link_interval);
2376                 intel_hdcp_update_value(connector,
2377                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2378                                         true);
2379         }
2380
2381         mutex_unlock(&dig_port->hdcp_mutex);
2382         mutex_unlock(&hdcp->mutex);
2383         return ret;
2384 }
2385
2386 int intel_hdcp_disable(struct intel_connector *connector)
2387 {
2388         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2389         struct intel_hdcp *hdcp = &connector->hdcp;
2390         int ret = 0;
2391
2392         if (!hdcp->shim)
2393                 return -ENOENT;
2394
2395         mutex_lock(&hdcp->mutex);
2396         mutex_lock(&dig_port->hdcp_mutex);
2397
2398         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2399                 goto out;
2400
2401         intel_hdcp_update_value(connector,
2402                                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2403         if (hdcp->hdcp2_encrypted)
2404                 ret = _intel_hdcp2_disable(connector, false);
2405         else if (hdcp->hdcp_encrypted)
2406                 ret = _intel_hdcp_disable(connector);
2407
2408 out:
2409         mutex_unlock(&dig_port->hdcp_mutex);
2410         mutex_unlock(&hdcp->mutex);
2411         cancel_delayed_work_sync(&hdcp->check_work);
2412         return ret;
2413 }
2414
2415 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2416                             struct intel_encoder *encoder,
2417                             const struct intel_crtc_state *crtc_state,
2418                             const struct drm_connector_state *conn_state)
2419 {
2420         struct intel_connector *connector =
2421                                 to_intel_connector(conn_state->connector);
2422         struct intel_hdcp *hdcp = &connector->hdcp;
2423         bool content_protection_type_changed, desired_and_not_enabled = false;
2424
2425         if (!connector->hdcp.shim)
2426                 return;
2427
2428         content_protection_type_changed =
2429                 (conn_state->hdcp_content_type != hdcp->content_type &&
2430                  conn_state->content_protection !=
2431                  DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2432
2433         /*
2434          * During the HDCP encryption session if Type change is requested,
2435          * disable the HDCP and reenable it with new TYPE value.
2436          */
2437         if (conn_state->content_protection ==
2438             DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2439             content_protection_type_changed)
2440                 intel_hdcp_disable(connector);
2441
2442         /*
2443          * Mark the hdcp state as DESIRED after the hdcp disable of type
2444          * change procedure.
2445          */
2446         if (content_protection_type_changed) {
2447                 mutex_lock(&hdcp->mutex);
2448                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2449                 drm_connector_get(&connector->base);
2450                 schedule_work(&hdcp->prop_work);
2451                 mutex_unlock(&hdcp->mutex);
2452         }
2453
2454         if (conn_state->content_protection ==
2455             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2456                 mutex_lock(&hdcp->mutex);
2457                 /* Avoid enabling hdcp, if it already ENABLED */
2458                 desired_and_not_enabled =
2459                         hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2460                 mutex_unlock(&hdcp->mutex);
2461                 /*
2462                  * If HDCP already ENABLED and CP property is DESIRED, schedule
2463                  * prop_work to update correct CP property to user space.
2464                  */
2465                 if (!desired_and_not_enabled && !content_protection_type_changed) {
2466                         drm_connector_get(&connector->base);
2467                         schedule_work(&hdcp->prop_work);
2468                 }
2469         }
2470
2471         if (desired_and_not_enabled || content_protection_type_changed)
2472                 intel_hdcp_enable(connector,
2473                                   crtc_state,
2474                                   (u8)conn_state->hdcp_content_type);
2475 }
2476
2477 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2478 {
2479         mutex_lock(&dev_priv->display.hdcp.comp_mutex);
2480         if (!dev_priv->display.hdcp.comp_added) {
2481                 mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2482                 return;
2483         }
2484
2485         dev_priv->display.hdcp.comp_added = false;
2486         mutex_unlock(&dev_priv->display.hdcp.comp_mutex);
2487
2488         component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2489 }
2490
2491 void intel_hdcp_cleanup(struct intel_connector *connector)
2492 {
2493         struct intel_hdcp *hdcp = &connector->hdcp;
2494
2495         if (!hdcp->shim)
2496                 return;
2497
2498         /*
2499          * If the connector is registered, it's possible userspace could kick
2500          * off another HDCP enable, which would re-spawn the workers.
2501          */
2502         drm_WARN_ON(connector->base.dev,
2503                 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2504
2505         /*
2506          * Now that the connector is not registered, check_work won't be run,
2507          * but cancel any outstanding instances of it
2508          */
2509         cancel_delayed_work_sync(&hdcp->check_work);
2510
2511         /*
2512          * We don't cancel prop_work in the same way as check_work since it
2513          * requires connection_mutex which could be held while calling this
2514          * function. Instead, we rely on the connector references grabbed before
2515          * scheduling prop_work to ensure the connector is alive when prop_work
2516          * is run. So if we're in the destroy path (which is where this
2517          * function should be called), we're "guaranteed" that prop_work is not
2518          * active (tl;dr This Should Never Happen).
2519          */
2520         drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2521
2522         mutex_lock(&hdcp->mutex);
2523         hdcp->shim = NULL;
2524         mutex_unlock(&hdcp->mutex);
2525 }
2526
2527 void intel_hdcp_atomic_check(struct drm_connector *connector,
2528                              struct drm_connector_state *old_state,
2529                              struct drm_connector_state *new_state)
2530 {
2531         u64 old_cp = old_state->content_protection;
2532         u64 new_cp = new_state->content_protection;
2533         struct drm_crtc_state *crtc_state;
2534
2535         if (!new_state->crtc) {
2536                 /*
2537                  * If the connector is being disabled with CP enabled, mark it
2538                  * desired so it's re-enabled when the connector is brought back
2539                  */
2540                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2541                         new_state->content_protection =
2542                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2543                 return;
2544         }
2545
2546         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2547                                                    new_state->crtc);
2548         /*
2549          * Fix the HDCP uapi content protection state in case of modeset.
2550          * FIXME: As per HDCP content protection property uapi doc, an uevent()
2551          * need to be sent if there is transition from ENABLED->DESIRED.
2552          */
2553         if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2554             (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2555             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2556                 new_state->content_protection =
2557                         DRM_MODE_CONTENT_PROTECTION_DESIRED;
2558
2559         /*
2560          * Nothing to do if the state didn't change, or HDCP was activated since
2561          * the last commit. And also no change in hdcp content type.
2562          */
2563         if (old_cp == new_cp ||
2564             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2565              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2566                 if (old_state->hdcp_content_type ==
2567                                 new_state->hdcp_content_type)
2568                         return;
2569         }
2570
2571         crtc_state->mode_changed = true;
2572 }
2573
2574 /* Handles the CP_IRQ raised from the DP HDCP sink */
2575 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2576 {
2577         struct intel_hdcp *hdcp = &connector->hdcp;
2578
2579         if (!hdcp->shim)
2580                 return;
2581
2582         atomic_inc(&connector->hdcp.cp_irq_count);
2583         wake_up_all(&connector->hdcp.cp_irq_queue);
2584
2585         schedule_delayed_work(&hdcp->check_work, 0);
2586 }