Merge branch 'asoc-5.3' into asoc-linus
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_csr.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/firmware.h>
26
27 #include "i915_drv.h"
28 #include "i915_reg.h"
29 #include "intel_csr.h"
30
31 /**
32  * DOC: csr support for dmc
33  *
34  * Display Context Save and Restore (CSR) firmware support added from gen9
35  * onwards to drive newly added DMC (Display microcontroller) in display
36  * engine to save and restore the state of display engine when it enter into
37  * low-power state and comes back to normal.
38  */
39
40 #define GEN12_CSR_MAX_FW_SIZE           ICL_CSR_MAX_FW_SIZE
41
42 #define ICL_CSR_PATH                    "i915/icl_dmc_ver1_07.bin"
43 #define ICL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 7)
44 #define ICL_CSR_MAX_FW_SIZE             0x6000
45 MODULE_FIRMWARE(ICL_CSR_PATH);
46
47 #define CNL_CSR_PATH                    "i915/cnl_dmc_ver1_07.bin"
48 #define CNL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 7)
49 #define CNL_CSR_MAX_FW_SIZE             GLK_CSR_MAX_FW_SIZE
50 MODULE_FIRMWARE(CNL_CSR_PATH);
51
52 #define GLK_CSR_PATH                    "i915/glk_dmc_ver1_04.bin"
53 #define GLK_CSR_VERSION_REQUIRED        CSR_VERSION(1, 4)
54 #define GLK_CSR_MAX_FW_SIZE             0x4000
55 MODULE_FIRMWARE(GLK_CSR_PATH);
56
57 #define KBL_CSR_PATH                    "i915/kbl_dmc_ver1_04.bin"
58 #define KBL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 4)
59 #define KBL_CSR_MAX_FW_SIZE             BXT_CSR_MAX_FW_SIZE
60 MODULE_FIRMWARE(KBL_CSR_PATH);
61
62 #define SKL_CSR_PATH                    "i915/skl_dmc_ver1_27.bin"
63 #define SKL_CSR_VERSION_REQUIRED        CSR_VERSION(1, 27)
64 #define SKL_CSR_MAX_FW_SIZE             BXT_CSR_MAX_FW_SIZE
65 MODULE_FIRMWARE(SKL_CSR_PATH);
66
67 #define BXT_CSR_PATH                    "i915/bxt_dmc_ver1_07.bin"
68 #define BXT_CSR_VERSION_REQUIRED        CSR_VERSION(1, 7)
69 #define BXT_CSR_MAX_FW_SIZE             0x3000
70 MODULE_FIRMWARE(BXT_CSR_PATH);
71
72 #define CSR_DEFAULT_FW_OFFSET           0xFFFFFFFF
73 #define PACKAGE_MAX_FW_INFO_ENTRIES     20
74 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES  32
75 #define DMC_V1_MAX_MMIO_COUNT           8
76 #define DMC_V3_MAX_MMIO_COUNT           20
77
78 struct intel_css_header {
79         /* 0x09 for DMC */
80         u32 module_type;
81
82         /* Includes the DMC specific header in dwords */
83         u32 header_len;
84
85         /* always value would be 0x10000 */
86         u32 header_ver;
87
88         /* Not used */
89         u32 module_id;
90
91         /* Not used */
92         u32 module_vendor;
93
94         /* in YYYYMMDD format */
95         u32 date;
96
97         /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
98         u32 size;
99
100         /* Not used */
101         u32 key_size;
102
103         /* Not used */
104         u32 modulus_size;
105
106         /* Not used */
107         u32 exponent_size;
108
109         /* Not used */
110         u32 reserved1[12];
111
112         /* Major Minor */
113         u32 version;
114
115         /* Not used */
116         u32 reserved2[8];
117
118         /* Not used */
119         u32 kernel_header_info;
120 } __packed;
121
122 struct intel_fw_info {
123         u8 reserved1;
124
125         /* reserved on package_header version 1, must be 0 on version 2 */
126         u8 dmc_id;
127
128         /* Stepping (A, B, C, ..., *). * is a wildcard */
129         char stepping;
130
131         /* Sub-stepping (0, 1, ..., *). * is a wildcard */
132         char substepping;
133
134         u32 offset;
135         u32 reserved2;
136 } __packed;
137
138 struct intel_package_header {
139         /* DMC container header length in dwords */
140         u8 header_len;
141
142         /* 0x01, 0x02 */
143         u8 header_ver;
144
145         u8 reserved[10];
146
147         /* Number of valid entries in the FWInfo array below */
148         u32 num_entries;
149 } __packed;
150
151 struct intel_dmc_header_base {
152         /* always value would be 0x40403E3E */
153         u32 signature;
154
155         /* DMC binary header length */
156         u8 header_len;
157
158         /* 0x01 */
159         u8 header_ver;
160
161         /* Reserved */
162         u16 dmcc_ver;
163
164         /* Major, Minor */
165         u32 project;
166
167         /* Firmware program size (excluding header) in dwords */
168         u32 fw_size;
169
170         /* Major Minor version */
171         u32 fw_version;
172 } __packed;
173
174 struct intel_dmc_header_v1 {
175         struct intel_dmc_header_base base;
176
177         /* Number of valid MMIO cycles present. */
178         u32 mmio_count;
179
180         /* MMIO address */
181         u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
182
183         /* MMIO data */
184         u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
185
186         /* FW filename  */
187         char dfile[32];
188
189         u32 reserved1[2];
190 } __packed;
191
192 struct intel_dmc_header_v3 {
193         struct intel_dmc_header_base base;
194
195         /* DMC RAM start MMIO address */
196         u32 start_mmioaddr;
197
198         u32 reserved[9];
199
200         /* FW filename */
201         char dfile[32];
202
203         /* Number of valid MMIO cycles present. */
204         u32 mmio_count;
205
206         /* MMIO address */
207         u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
208
209         /* MMIO data */
210         u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
211 } __packed;
212
213 struct stepping_info {
214         char stepping;
215         char substepping;
216 };
217
218 static const struct stepping_info skl_stepping_info[] = {
219         {'A', '0'}, {'B', '0'}, {'C', '0'},
220         {'D', '0'}, {'E', '0'}, {'F', '0'},
221         {'G', '0'}, {'H', '0'}, {'I', '0'},
222         {'J', '0'}, {'K', '0'}
223 };
224
225 static const struct stepping_info bxt_stepping_info[] = {
226         {'A', '0'}, {'A', '1'}, {'A', '2'},
227         {'B', '0'}, {'B', '1'}, {'B', '2'}
228 };
229
230 static const struct stepping_info icl_stepping_info[] = {
231         {'A', '0'}, {'A', '1'}, {'A', '2'},
232         {'B', '0'}, {'B', '2'},
233         {'C', '0'}
234 };
235
236 static const struct stepping_info no_stepping_info = { '*', '*' };
237
238 static const struct stepping_info *
239 intel_get_stepping_info(struct drm_i915_private *dev_priv)
240 {
241         const struct stepping_info *si;
242         unsigned int size;
243
244         if (IS_ICELAKE(dev_priv)) {
245                 size = ARRAY_SIZE(icl_stepping_info);
246                 si = icl_stepping_info;
247         } else if (IS_SKYLAKE(dev_priv)) {
248                 size = ARRAY_SIZE(skl_stepping_info);
249                 si = skl_stepping_info;
250         } else if (IS_BROXTON(dev_priv)) {
251                 size = ARRAY_SIZE(bxt_stepping_info);
252                 si = bxt_stepping_info;
253         } else {
254                 size = 0;
255                 si = NULL;
256         }
257
258         if (INTEL_REVID(dev_priv) < size)
259                 return si + INTEL_REVID(dev_priv);
260
261         return &no_stepping_info;
262 }
263
264 static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
265 {
266         u32 val, mask;
267
268         mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
269
270         if (IS_GEN9_LP(dev_priv))
271                 mask |= DC_STATE_DEBUG_MASK_CORES;
272
273         /* The below bit doesn't need to be cleared ever afterwards */
274         val = I915_READ(DC_STATE_DEBUG);
275         if ((val & mask) != mask) {
276                 val |= mask;
277                 I915_WRITE(DC_STATE_DEBUG, val);
278                 POSTING_READ(DC_STATE_DEBUG);
279         }
280 }
281
282 /**
283  * intel_csr_load_program() - write the firmware from memory to register.
284  * @dev_priv: i915 drm device.
285  *
286  * CSR firmware is read from a .bin file and kept in internal memory one time.
287  * Everytime display comes back from low power state this function is called to
288  * copy the firmware from internal memory to registers.
289  */
290 void intel_csr_load_program(struct drm_i915_private *dev_priv)
291 {
292         u32 *payload = dev_priv->csr.dmc_payload;
293         u32 i, fw_size;
294
295         if (!HAS_CSR(dev_priv)) {
296                 DRM_ERROR("No CSR support available for this platform\n");
297                 return;
298         }
299
300         if (!dev_priv->csr.dmc_payload) {
301                 DRM_ERROR("Tried to program CSR with empty payload\n");
302                 return;
303         }
304
305         fw_size = dev_priv->csr.dmc_fw_size;
306         assert_rpm_wakelock_held(&dev_priv->runtime_pm);
307
308         preempt_disable();
309
310         for (i = 0; i < fw_size; i++)
311                 I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
312
313         preempt_enable();
314
315         for (i = 0; i < dev_priv->csr.mmio_count; i++) {
316                 I915_WRITE(dev_priv->csr.mmioaddr[i],
317                            dev_priv->csr.mmiodata[i]);
318         }
319
320         dev_priv->csr.dc_state = 0;
321
322         gen9_set_dc_state_debugmask(dev_priv);
323 }
324
325 /*
326  * Search fw_info table for dmc_offset to find firmware binary: num_entries is
327  * already sanitized.
328  */
329 static u32 find_dmc_fw_offset(const struct intel_fw_info *fw_info,
330                               unsigned int num_entries,
331                               const struct stepping_info *si,
332                               u8 package_ver)
333 {
334         u32 dmc_offset = CSR_DEFAULT_FW_OFFSET;
335         unsigned int i;
336
337         for (i = 0; i < num_entries; i++) {
338                 if (package_ver > 1 && fw_info[i].dmc_id != 0)
339                         continue;
340
341                 if (fw_info[i].substepping == '*' &&
342                     si->stepping == fw_info[i].stepping) {
343                         dmc_offset = fw_info[i].offset;
344                         break;
345                 }
346
347                 if (si->stepping == fw_info[i].stepping &&
348                     si->substepping == fw_info[i].substepping) {
349                         dmc_offset = fw_info[i].offset;
350                         break;
351                 }
352
353                 if (fw_info[i].stepping == '*' &&
354                     fw_info[i].substepping == '*') {
355                         /*
356                          * In theory we should stop the search as generic
357                          * entries should always come after the more specific
358                          * ones, but let's continue to make sure to work even
359                          * with "broken" firmwares. If we don't find a more
360                          * specific one, then we use this entry
361                          */
362                         dmc_offset = fw_info[i].offset;
363                 }
364         }
365
366         return dmc_offset;
367 }
368
369 static u32 parse_csr_fw_dmc(struct intel_csr *csr,
370                             const struct intel_dmc_header_base *dmc_header,
371                             size_t rem_size)
372 {
373         unsigned int header_len_bytes, dmc_header_size, payload_size, i;
374         const u32 *mmioaddr, *mmiodata;
375         u32 mmio_count, mmio_count_max;
376         u8 *payload;
377
378         BUILD_BUG_ON(ARRAY_SIZE(csr->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
379                      ARRAY_SIZE(csr->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
380
381         /*
382          * Check if we can access common fields, we will checkc again below
383          * after we have read the version
384          */
385         if (rem_size < sizeof(struct intel_dmc_header_base))
386                 goto error_truncated;
387
388         /* Cope with small differences between v1 and v3 */
389         if (dmc_header->header_ver == 3) {
390                 const struct intel_dmc_header_v3 *v3 =
391                         (const struct intel_dmc_header_v3 *)dmc_header;
392
393                 if (rem_size < sizeof(struct intel_dmc_header_v3))
394                         goto error_truncated;
395
396                 mmioaddr = v3->mmioaddr;
397                 mmiodata = v3->mmiodata;
398                 mmio_count = v3->mmio_count;
399                 mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
400                 /* header_len is in dwords */
401                 header_len_bytes = dmc_header->header_len * 4;
402                 dmc_header_size = sizeof(*v3);
403         } else if (dmc_header->header_ver == 1) {
404                 const struct intel_dmc_header_v1 *v1 =
405                         (const struct intel_dmc_header_v1 *)dmc_header;
406
407                 if (rem_size < sizeof(struct intel_dmc_header_v1))
408                         goto error_truncated;
409
410                 mmioaddr = v1->mmioaddr;
411                 mmiodata = v1->mmiodata;
412                 mmio_count = v1->mmio_count;
413                 mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
414                 header_len_bytes = dmc_header->header_len;
415                 dmc_header_size = sizeof(*v1);
416         } else {
417                 DRM_ERROR("Unknown DMC fw header version: %u\n",
418                           dmc_header->header_ver);
419                 return 0;
420         }
421
422         if (header_len_bytes != dmc_header_size) {
423                 DRM_ERROR("DMC firmware has wrong dmc header length "
424                           "(%u bytes)\n", header_len_bytes);
425                 return 0;
426         }
427
428         /* Cache the dmc header info. */
429         if (mmio_count > mmio_count_max) {
430                 DRM_ERROR("DMC firmware has wrong mmio count %u\n", mmio_count);
431                 return 0;
432         }
433
434         for (i = 0; i < mmio_count; i++) {
435                 if (mmioaddr[i] < CSR_MMIO_START_RANGE ||
436                     mmioaddr[i] > CSR_MMIO_END_RANGE) {
437                         DRM_ERROR("DMC firmware has wrong mmio address 0x%x\n",
438                                   mmioaddr[i]);
439                         return 0;
440                 }
441                 csr->mmioaddr[i] = _MMIO(mmioaddr[i]);
442                 csr->mmiodata[i] = mmiodata[i];
443         }
444         csr->mmio_count = mmio_count;
445
446         rem_size -= header_len_bytes;
447
448         /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
449         payload_size = dmc_header->fw_size * 4;
450         if (rem_size < payload_size)
451                 goto error_truncated;
452
453         if (payload_size > csr->max_fw_size) {
454                 DRM_ERROR("DMC FW too big (%u bytes)\n", payload_size);
455                 return 0;
456         }
457         csr->dmc_fw_size = dmc_header->fw_size;
458
459         csr->dmc_payload = kmalloc(payload_size, GFP_KERNEL);
460         if (!csr->dmc_payload) {
461                 DRM_ERROR("Memory allocation failed for dmc payload\n");
462                 return 0;
463         }
464
465         payload = (u8 *)(dmc_header) + header_len_bytes;
466         memcpy(csr->dmc_payload, payload, payload_size);
467
468         return header_len_bytes + payload_size;
469
470 error_truncated:
471         DRM_ERROR("Truncated DMC firmware, refusing.\n");
472         return 0;
473 }
474
475 static u32
476 parse_csr_fw_package(struct intel_csr *csr,
477                      const struct intel_package_header *package_header,
478                      const struct stepping_info *si,
479                      size_t rem_size)
480 {
481         u32 package_size = sizeof(struct intel_package_header);
482         u32 num_entries, max_entries, dmc_offset;
483         const struct intel_fw_info *fw_info;
484
485         if (rem_size < package_size)
486                 goto error_truncated;
487
488         if (package_header->header_ver == 1) {
489                 max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
490         } else if (package_header->header_ver == 2) {
491                 max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
492         } else {
493                 DRM_ERROR("DMC firmware has unknown header version %u\n",
494                           package_header->header_ver);
495                 return 0;
496         }
497
498         /*
499          * We should always have space for max_entries,
500          * even if not all are used
501          */
502         package_size += max_entries * sizeof(struct intel_fw_info);
503         if (rem_size < package_size)
504                 goto error_truncated;
505
506         if (package_header->header_len * 4 != package_size) {
507                 DRM_ERROR("DMC firmware has wrong package header length "
508                           "(%u bytes)\n", package_size);
509                 return 0;
510         }
511
512         num_entries = package_header->num_entries;
513         if (WARN_ON(package_header->num_entries > max_entries))
514                 num_entries = max_entries;
515
516         fw_info = (const struct intel_fw_info *)
517                 ((u8 *)package_header + sizeof(*package_header));
518         dmc_offset = find_dmc_fw_offset(fw_info, num_entries, si,
519                                         package_header->header_ver);
520         if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
521                 DRM_ERROR("DMC firmware not supported for %c stepping\n",
522                           si->stepping);
523                 return 0;
524         }
525
526         /* dmc_offset is in dwords */
527         return package_size + dmc_offset * 4;
528
529 error_truncated:
530         DRM_ERROR("Truncated DMC firmware, refusing.\n");
531         return 0;
532 }
533
534 /* Return number of bytes parsed or 0 on error */
535 static u32 parse_csr_fw_css(struct intel_csr *csr,
536                             struct intel_css_header *css_header,
537                             size_t rem_size)
538 {
539         if (rem_size < sizeof(struct intel_css_header)) {
540                 DRM_ERROR("Truncated DMC firmware, refusing.\n");
541                 return 0;
542         }
543
544         if (sizeof(struct intel_css_header) !=
545             (css_header->header_len * 4)) {
546                 DRM_ERROR("DMC firmware has wrong CSS header length "
547                           "(%u bytes)\n",
548                           (css_header->header_len * 4));
549                 return 0;
550         }
551
552         if (csr->required_version &&
553             css_header->version != csr->required_version) {
554                 DRM_INFO("Refusing to load DMC firmware v%u.%u,"
555                          " please use v%u.%u\n",
556                          CSR_VERSION_MAJOR(css_header->version),
557                          CSR_VERSION_MINOR(css_header->version),
558                          CSR_VERSION_MAJOR(csr->required_version),
559                          CSR_VERSION_MINOR(csr->required_version));
560                 return 0;
561         }
562
563         csr->version = css_header->version;
564
565         return sizeof(struct intel_css_header);
566 }
567
568 static void parse_csr_fw(struct drm_i915_private *dev_priv,
569                          const struct firmware *fw)
570 {
571         struct intel_css_header *css_header;
572         struct intel_package_header *package_header;
573         struct intel_dmc_header_base *dmc_header;
574         struct intel_csr *csr = &dev_priv->csr;
575         const struct stepping_info *si = intel_get_stepping_info(dev_priv);
576         u32 readcount = 0;
577         u32 r;
578
579         if (!fw)
580                 return;
581
582         /* Extract CSS Header information */
583         css_header = (struct intel_css_header *)fw->data;
584         r = parse_csr_fw_css(csr, css_header, fw->size);
585         if (!r)
586                 return;
587
588         readcount += r;
589
590         /* Extract Package Header information */
591         package_header = (struct intel_package_header *)&fw->data[readcount];
592         r = parse_csr_fw_package(csr, package_header, si, fw->size - readcount);
593         if (!r)
594                 return;
595
596         readcount += r;
597
598         /* Extract dmc_header information */
599         dmc_header = (struct intel_dmc_header_base *)&fw->data[readcount];
600         parse_csr_fw_dmc(csr, dmc_header, fw->size - readcount);
601 }
602
603 static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
604 {
605         WARN_ON(dev_priv->csr.wakeref);
606         dev_priv->csr.wakeref =
607                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
608 }
609
610 static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
611 {
612         intel_wakeref_t wakeref __maybe_unused =
613                 fetch_and_zero(&dev_priv->csr.wakeref);
614
615         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
616 }
617
618 static void csr_load_work_fn(struct work_struct *work)
619 {
620         struct drm_i915_private *dev_priv;
621         struct intel_csr *csr;
622         const struct firmware *fw = NULL;
623
624         dev_priv = container_of(work, typeof(*dev_priv), csr.work);
625         csr = &dev_priv->csr;
626
627         request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
628         parse_csr_fw(dev_priv, fw);
629
630         if (dev_priv->csr.dmc_payload) {
631                 intel_csr_load_program(dev_priv);
632                 intel_csr_runtime_pm_put(dev_priv);
633
634                 DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
635                          dev_priv->csr.fw_path,
636                          CSR_VERSION_MAJOR(csr->version),
637                          CSR_VERSION_MINOR(csr->version));
638         } else {
639                 dev_notice(dev_priv->drm.dev,
640                            "Failed to load DMC firmware %s."
641                            " Disabling runtime power management.\n",
642                            csr->fw_path);
643                 dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
644                            INTEL_UC_FIRMWARE_URL);
645         }
646
647         release_firmware(fw);
648 }
649
650 /**
651  * intel_csr_ucode_init() - initialize the firmware loading.
652  * @dev_priv: i915 drm device.
653  *
654  * This function is called at the time of loading the display driver to read
655  * firmware from a .bin file and copied into a internal memory.
656  */
657 void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
658 {
659         struct intel_csr *csr = &dev_priv->csr;
660
661         INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
662
663         if (!HAS_CSR(dev_priv))
664                 return;
665
666         /*
667          * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
668          * runtime-suspend.
669          *
670          * On error, we return with the rpm wakeref held to prevent runtime
671          * suspend as runtime suspend *requires* a working CSR for whatever
672          * reason.
673          */
674         intel_csr_runtime_pm_get(dev_priv);
675
676         if (INTEL_GEN(dev_priv) >= 12) {
677                 /* Allow to load fw via parameter using the last known size */
678                 csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
679         } else if (IS_GEN(dev_priv, 11)) {
680                 csr->fw_path = ICL_CSR_PATH;
681                 csr->required_version = ICL_CSR_VERSION_REQUIRED;
682                 csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
683         } else if (IS_CANNONLAKE(dev_priv)) {
684                 csr->fw_path = CNL_CSR_PATH;
685                 csr->required_version = CNL_CSR_VERSION_REQUIRED;
686                 csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
687         } else if (IS_GEMINILAKE(dev_priv)) {
688                 csr->fw_path = GLK_CSR_PATH;
689                 csr->required_version = GLK_CSR_VERSION_REQUIRED;
690                 csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
691         } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
692                 csr->fw_path = KBL_CSR_PATH;
693                 csr->required_version = KBL_CSR_VERSION_REQUIRED;
694                 csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
695         } else if (IS_SKYLAKE(dev_priv)) {
696                 csr->fw_path = SKL_CSR_PATH;
697                 csr->required_version = SKL_CSR_VERSION_REQUIRED;
698                 csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
699         } else if (IS_BROXTON(dev_priv)) {
700                 csr->fw_path = BXT_CSR_PATH;
701                 csr->required_version = BXT_CSR_VERSION_REQUIRED;
702                 csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
703         }
704
705         if (i915_modparams.dmc_firmware_path) {
706                 if (strlen(i915_modparams.dmc_firmware_path) == 0) {
707                         csr->fw_path = NULL;
708                         DRM_INFO("Disabling CSR firmware and runtime PM\n");
709                         return;
710                 }
711
712                 csr->fw_path = i915_modparams.dmc_firmware_path;
713                 /* Bypass version check for firmware override. */
714                 csr->required_version = 0;
715         }
716
717         if (csr->fw_path == NULL) {
718                 DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
719                 return;
720         }
721
722         DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
723         schedule_work(&dev_priv->csr.work);
724 }
725
726 /**
727  * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
728  * @dev_priv: i915 drm device
729  *
730  * Prepare the DMC firmware before entering system suspend. This includes
731  * flushing pending work items and releasing any resources acquired during
732  * init.
733  */
734 void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
735 {
736         if (!HAS_CSR(dev_priv))
737                 return;
738
739         flush_work(&dev_priv->csr.work);
740
741         /* Drop the reference held in case DMC isn't loaded. */
742         if (!dev_priv->csr.dmc_payload)
743                 intel_csr_runtime_pm_put(dev_priv);
744 }
745
746 /**
747  * intel_csr_ucode_resume() - init CSR firmware during system resume
748  * @dev_priv: i915 drm device
749  *
750  * Reinitialize the DMC firmware during system resume, reacquiring any
751  * resources released in intel_csr_ucode_suspend().
752  */
753 void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
754 {
755         if (!HAS_CSR(dev_priv))
756                 return;
757
758         /*
759          * Reacquire the reference to keep RPM disabled in case DMC isn't
760          * loaded.
761          */
762         if (!dev_priv->csr.dmc_payload)
763                 intel_csr_runtime_pm_get(dev_priv);
764 }
765
766 /**
767  * intel_csr_ucode_fini() - unload the CSR firmware.
768  * @dev_priv: i915 drm device.
769  *
770  * Firmmware unloading includes freeing the internal memory and reset the
771  * firmware loading status.
772  */
773 void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
774 {
775         if (!HAS_CSR(dev_priv))
776                 return;
777
778         intel_csr_ucode_suspend(dev_priv);
779         WARN_ON(dev_priv->csr.wakeref);
780
781         kfree(dev_priv->csr.dmc_payload);
782 }