drm/msm/dpu: add support for MDP_TOP blackhole
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_kms.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
5  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6  *
7  * Author: Rob Clark <robdclark@gmail.com>
8  */
9
10 #define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
11
12 #include <linux/debugfs.h>
13 #include <linux/dma-buf.h>
14 #include <linux/of_irq.h>
15 #include <linux/pm_opp.h>
16
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_framebuffer.h>
20 #include <drm/drm_vblank.h>
21 #include <drm/drm_writeback.h>
22
23 #include "msm_drv.h"
24 #include "msm_mmu.h"
25 #include "msm_gem.h"
26 #include "disp/msm_disp_snapshot.h"
27
28 #include "dpu_core_irq.h"
29 #include "dpu_crtc.h"
30 #include "dpu_encoder.h"
31 #include "dpu_formats.h"
32 #include "dpu_hw_vbif.h"
33 #include "dpu_kms.h"
34 #include "dpu_plane.h"
35 #include "dpu_vbif.h"
36 #include "dpu_writeback.h"
37
38 #define CREATE_TRACE_POINTS
39 #include "dpu_trace.h"
40
41 /*
42  * To enable overall DRM driver logging
43  * # echo 0x2 > /sys/module/drm/parameters/debug
44  *
45  * To enable DRM driver h/w logging
46  * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
47  *
48  * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
49  */
50 #define DPU_DEBUGFS_DIR "msm_dpu"
51 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
52
53 static int dpu_kms_hw_init(struct msm_kms *kms);
54 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
55
56 #ifdef CONFIG_DEBUG_FS
57 static int _dpu_danger_signal_status(struct seq_file *s,
58                 bool danger_status)
59 {
60         struct dpu_kms *kms = (struct dpu_kms *)s->private;
61         struct dpu_danger_safe_status status;
62         int i;
63
64         if (!kms->hw_mdp) {
65                 DPU_ERROR("invalid arg(s)\n");
66                 return 0;
67         }
68
69         memset(&status, 0, sizeof(struct dpu_danger_safe_status));
70
71         pm_runtime_get_sync(&kms->pdev->dev);
72         if (danger_status) {
73                 seq_puts(s, "\nDanger signal status:\n");
74                 if (kms->hw_mdp->ops.get_danger_status)
75                         kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
76                                         &status);
77         } else {
78                 seq_puts(s, "\nSafe signal status:\n");
79                 if (kms->hw_mdp->ops.get_safe_status)
80                         kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
81                                         &status);
82         }
83         pm_runtime_put_sync(&kms->pdev->dev);
84
85         seq_printf(s, "MDP     :  0x%x\n", status.mdp);
86
87         for (i = SSPP_VIG0; i < SSPP_MAX; i++)
88                 seq_printf(s, "SSPP%d   :  0x%x  \n", i - SSPP_VIG0,
89                                 status.sspp[i]);
90         seq_puts(s, "\n");
91
92         return 0;
93 }
94
95 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
96 {
97         return _dpu_danger_signal_status(s, true);
98 }
99 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
100
101 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
102 {
103         return _dpu_danger_signal_status(s, false);
104 }
105 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
106
107 static ssize_t _dpu_plane_danger_read(struct file *file,
108                         char __user *buff, size_t count, loff_t *ppos)
109 {
110         struct dpu_kms *kms = file->private_data;
111         int len;
112         char buf[40];
113
114         len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
115
116         return simple_read_from_buffer(buff, count, ppos, buf, len);
117 }
118
119 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
120 {
121         struct drm_plane *plane;
122
123         drm_for_each_plane(plane, kms->dev) {
124                 if (plane->fb && plane->state) {
125                         dpu_plane_danger_signal_ctrl(plane, enable);
126                         DPU_DEBUG("plane:%d img:%dx%d ",
127                                 plane->base.id, plane->fb->width,
128                                 plane->fb->height);
129                         DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
130                                 plane->state->src_x >> 16,
131                                 plane->state->src_y >> 16,
132                                 plane->state->src_w >> 16,
133                                 plane->state->src_h >> 16,
134                                 plane->state->crtc_x, plane->state->crtc_y,
135                                 plane->state->crtc_w, plane->state->crtc_h);
136                 } else {
137                         DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
138                 }
139         }
140 }
141
142 static ssize_t _dpu_plane_danger_write(struct file *file,
143                     const char __user *user_buf, size_t count, loff_t *ppos)
144 {
145         struct dpu_kms *kms = file->private_data;
146         int disable_panic;
147         int ret;
148
149         ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
150         if (ret)
151                 return ret;
152
153         if (disable_panic) {
154                 /* Disable panic signal for all active pipes */
155                 DPU_DEBUG("Disabling danger:\n");
156                 _dpu_plane_set_danger_state(kms, false);
157                 kms->has_danger_ctrl = false;
158         } else {
159                 /* Enable panic signal for all active pipes */
160                 DPU_DEBUG("Enabling danger:\n");
161                 kms->has_danger_ctrl = true;
162                 _dpu_plane_set_danger_state(kms, true);
163         }
164
165         return count;
166 }
167
168 static const struct file_operations dpu_plane_danger_enable = {
169         .open = simple_open,
170         .read = _dpu_plane_danger_read,
171         .write = _dpu_plane_danger_write,
172 };
173
174 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
175                 struct dentry *parent)
176 {
177         struct dentry *entry = debugfs_create_dir("danger", parent);
178
179         debugfs_create_file("danger_status", 0600, entry,
180                         dpu_kms, &dpu_debugfs_danger_stats_fops);
181         debugfs_create_file("safe_status", 0600, entry,
182                         dpu_kms, &dpu_debugfs_safe_stats_fops);
183         debugfs_create_file("disable_danger", 0600, entry,
184                         dpu_kms, &dpu_plane_danger_enable);
185
186 }
187
188 /*
189  * Companion structure for dpu_debugfs_create_regset32.
190  */
191 struct dpu_debugfs_regset32 {
192         uint32_t offset;
193         uint32_t blk_len;
194         struct dpu_kms *dpu_kms;
195 };
196
197 static int dpu_regset32_show(struct seq_file *s, void *data)
198 {
199         struct dpu_debugfs_regset32 *regset = s->private;
200         struct dpu_kms *dpu_kms = regset->dpu_kms;
201         void __iomem *base;
202         uint32_t i, addr;
203
204         if (!dpu_kms->mmio)
205                 return 0;
206
207         base = dpu_kms->mmio + regset->offset;
208
209         /* insert padding spaces, if needed */
210         if (regset->offset & 0xF) {
211                 seq_printf(s, "[%x]", regset->offset & ~0xF);
212                 for (i = 0; i < (regset->offset & 0xF); i += 4)
213                         seq_puts(s, "         ");
214         }
215
216         pm_runtime_get_sync(&dpu_kms->pdev->dev);
217
218         /* main register output */
219         for (i = 0; i < regset->blk_len; i += 4) {
220                 addr = regset->offset + i;
221                 if ((addr & 0xF) == 0x0)
222                         seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
223                 seq_printf(s, " %08x", readl_relaxed(base + i));
224         }
225         seq_puts(s, "\n");
226         pm_runtime_put_sync(&dpu_kms->pdev->dev);
227
228         return 0;
229 }
230 DEFINE_SHOW_ATTRIBUTE(dpu_regset32);
231
232 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
233                 void *parent,
234                 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
235 {
236         struct dpu_debugfs_regset32 *regset;
237
238         if (WARN_ON(!name || !dpu_kms || !length))
239                 return;
240
241         regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
242         if (!regset)
243                 return;
244
245         /* make sure offset is a multiple of 4 */
246         regset->offset = round_down(offset, 4);
247         regset->blk_len = length;
248         regset->dpu_kms = dpu_kms;
249
250         debugfs_create_file(name, mode, parent, regset, &dpu_regset32_fops);
251 }
252
253 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
254 {
255         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
256         void *p = dpu_hw_util_get_log_mask_ptr();
257         struct dentry *entry;
258         struct drm_device *dev;
259         struct msm_drm_private *priv;
260         int i;
261
262         if (!p)
263                 return -EINVAL;
264
265         /* Only create a set of debugfs for the primary node, ignore render nodes */
266         if (minor->type != DRM_MINOR_PRIMARY)
267                 return 0;
268
269         dev = dpu_kms->dev;
270         priv = dev->dev_private;
271
272         entry = debugfs_create_dir("debug", minor->debugfs_root);
273
274         debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
275
276         dpu_debugfs_danger_init(dpu_kms, entry);
277         dpu_debugfs_vbif_init(dpu_kms, entry);
278         dpu_debugfs_core_irq_init(dpu_kms, entry);
279         dpu_debugfs_sspp_init(dpu_kms, entry);
280
281         for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
282                 if (priv->dp[i])
283                         msm_dp_debugfs_init(priv->dp[i], minor);
284         }
285
286         return dpu_core_perf_debugfs_init(dpu_kms, entry);
287 }
288 #endif
289
290 /* Global/shared object state funcs */
291
292 /*
293  * This is a helper that returns the private state currently in operation.
294  * Note that this would return the "old_state" if called in the atomic check
295  * path, and the "new_state" after the atomic swap has been done.
296  */
297 struct dpu_global_state *
298 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
299 {
300         return to_dpu_global_state(dpu_kms->global_state.state);
301 }
302
303 /*
304  * This acquires the modeset lock set aside for global state, creates
305  * a new duplicated private object state.
306  */
307 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
308 {
309         struct msm_drm_private *priv = s->dev->dev_private;
310         struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
311         struct drm_private_state *priv_state;
312         int ret;
313
314         ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
315         if (ret)
316                 return ERR_PTR(ret);
317
318         priv_state = drm_atomic_get_private_obj_state(s,
319                                                 &dpu_kms->global_state);
320         if (IS_ERR(priv_state))
321                 return ERR_CAST(priv_state);
322
323         return to_dpu_global_state(priv_state);
324 }
325
326 static struct drm_private_state *
327 dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
328 {
329         struct dpu_global_state *state;
330
331         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
332         if (!state)
333                 return NULL;
334
335         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
336
337         return &state->base;
338 }
339
340 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
341                                       struct drm_private_state *state)
342 {
343         struct dpu_global_state *dpu_state = to_dpu_global_state(state);
344
345         kfree(dpu_state);
346 }
347
348 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
349         .atomic_duplicate_state = dpu_kms_global_duplicate_state,
350         .atomic_destroy_state = dpu_kms_global_destroy_state,
351 };
352
353 static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
354 {
355         struct dpu_global_state *state;
356
357         drm_modeset_lock_init(&dpu_kms->global_state_lock);
358
359         state = kzalloc(sizeof(*state), GFP_KERNEL);
360         if (!state)
361                 return -ENOMEM;
362
363         drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
364                                     &state->base,
365                                     &dpu_kms_global_state_funcs);
366         return 0;
367 }
368
369 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
370 {
371         struct icc_path *path0;
372         struct icc_path *path1;
373         struct drm_device *dev = dpu_kms->dev;
374         struct device *dpu_dev = dev->dev;
375
376         path0 = msm_icc_get(dpu_dev, "mdp0-mem");
377         path1 = msm_icc_get(dpu_dev, "mdp1-mem");
378
379         if (IS_ERR_OR_NULL(path0))
380                 return PTR_ERR_OR_ZERO(path0);
381
382         dpu_kms->path[0] = path0;
383         dpu_kms->num_paths = 1;
384
385         if (!IS_ERR_OR_NULL(path1)) {
386                 dpu_kms->path[1] = path1;
387                 dpu_kms->num_paths++;
388         }
389         return 0;
390 }
391
392 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
393 {
394         return dpu_crtc_vblank(crtc, true);
395 }
396
397 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
398 {
399         dpu_crtc_vblank(crtc, false);
400 }
401
402 static void dpu_kms_enable_commit(struct msm_kms *kms)
403 {
404         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
405         pm_runtime_get_sync(&dpu_kms->pdev->dev);
406 }
407
408 static void dpu_kms_disable_commit(struct msm_kms *kms)
409 {
410         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
411         pm_runtime_put_sync(&dpu_kms->pdev->dev);
412 }
413
414 static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
415 {
416         struct drm_encoder *encoder;
417
418         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
419                 ktime_t vsync_time;
420
421                 if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
422                         return vsync_time;
423         }
424
425         return ktime_get();
426 }
427
428 static void dpu_kms_prepare_commit(struct msm_kms *kms,
429                 struct drm_atomic_state *state)
430 {
431         struct drm_crtc *crtc;
432         struct drm_crtc_state *crtc_state;
433         struct drm_encoder *encoder;
434         int i;
435
436         if (!kms)
437                 return;
438
439         /* Call prepare_commit for all affected encoders */
440         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
441                 drm_for_each_encoder_mask(encoder, crtc->dev,
442                                           crtc_state->encoder_mask) {
443                         dpu_encoder_prepare_commit(encoder);
444                 }
445         }
446 }
447
448 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
449 {
450         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
451         struct drm_crtc *crtc;
452
453         for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
454                 if (!crtc->state->active)
455                         continue;
456
457                 trace_dpu_kms_commit(DRMID(crtc));
458                 dpu_crtc_commit_kickoff(crtc);
459         }
460 }
461
462 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
463 {
464         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
465         struct drm_crtc *crtc;
466
467         DPU_ATRACE_BEGIN("kms_complete_commit");
468
469         for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
470                 dpu_crtc_complete_commit(crtc);
471
472         DPU_ATRACE_END("kms_complete_commit");
473 }
474
475 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
476                 struct drm_crtc *crtc)
477 {
478         struct drm_encoder *encoder;
479         struct drm_device *dev;
480         int ret;
481
482         if (!kms || !crtc || !crtc->state) {
483                 DPU_ERROR("invalid params\n");
484                 return;
485         }
486
487         dev = crtc->dev;
488
489         if (!crtc->state->enable) {
490                 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
491                 return;
492         }
493
494         if (!crtc->state->active) {
495                 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
496                 return;
497         }
498
499         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
500                 if (encoder->crtc != crtc)
501                         continue;
502                 /*
503                  * Wait for post-flush if necessary to delay before
504                  * plane_cleanup. For example, wait for vsync in case of video
505                  * mode panels. This may be a no-op for command mode panels.
506                  */
507                 trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
508                 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
509                 if (ret && ret != -EWOULDBLOCK) {
510                         DPU_ERROR("wait for commit done returned %d\n", ret);
511                         break;
512                 }
513         }
514 }
515
516 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
517 {
518         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
519         struct drm_crtc *crtc;
520
521         for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
522                 dpu_kms_wait_for_commit_done(kms, crtc);
523 }
524
525 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
526                                     struct msm_drm_private *priv,
527                                     struct dpu_kms *dpu_kms)
528 {
529         struct drm_encoder *encoder = NULL;
530         struct msm_display_info info;
531         int i, rc = 0;
532
533         if (!(priv->dsi[0] || priv->dsi[1]))
534                 return rc;
535
536         /*
537          * We support following confiurations:
538          * - Single DSI host (dsi0 or dsi1)
539          * - Two independent DSI hosts
540          * - Bonded DSI0 and DSI1 hosts
541          *
542          * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
543          */
544         for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
545                 int other = (i + 1) % 2;
546
547                 if (!priv->dsi[i])
548                         continue;
549
550                 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
551                     !msm_dsi_is_master_dsi(priv->dsi[i]))
552                         continue;
553
554                 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
555                 if (IS_ERR(encoder)) {
556                         DPU_ERROR("encoder init failed for dsi display\n");
557                         return PTR_ERR(encoder);
558                 }
559
560                 memset(&info, 0, sizeof(info));
561                 info.intf_type = encoder->encoder_type;
562
563                 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
564                 if (rc) {
565                         DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
566                                 i, rc);
567                         break;
568                 }
569
570                 info.h_tile_instance[info.num_of_h_tiles++] = i;
571                 info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
572
573                 info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
574
575                 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
576                         rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
577                         if (rc) {
578                                 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
579                                         other, rc);
580                                 break;
581                         }
582
583                         info.h_tile_instance[info.num_of_h_tiles++] = other;
584                 }
585
586                 rc = dpu_encoder_setup(dev, encoder, &info);
587                 if (rc)
588                         DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
589                                   encoder->base.id, rc);
590         }
591
592         return rc;
593 }
594
595 static int _dpu_kms_initialize_displayport(struct drm_device *dev,
596                                             struct msm_drm_private *priv,
597                                             struct dpu_kms *dpu_kms)
598 {
599         struct drm_encoder *encoder = NULL;
600         struct msm_display_info info;
601         int rc;
602         int i;
603
604         for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
605                 if (!priv->dp[i])
606                         continue;
607
608                 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
609                 if (IS_ERR(encoder)) {
610                         DPU_ERROR("encoder init failed for dsi display\n");
611                         return PTR_ERR(encoder);
612                 }
613
614                 memset(&info, 0, sizeof(info));
615                 rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
616                 if (rc) {
617                         DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
618                         drm_encoder_cleanup(encoder);
619                         return rc;
620                 }
621
622                 info.num_of_h_tiles = 1;
623                 info.h_tile_instance[0] = i;
624                 info.intf_type = encoder->encoder_type;
625                 rc = dpu_encoder_setup(dev, encoder, &info);
626                 if (rc) {
627                         DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
628                                   encoder->base.id, rc);
629                         return rc;
630                 }
631         }
632
633         return 0;
634 }
635
636 static int _dpu_kms_initialize_writeback(struct drm_device *dev,
637                 struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
638                 const u32 *wb_formats, int n_formats)
639 {
640         struct drm_encoder *encoder = NULL;
641         struct msm_display_info info;
642         int rc;
643
644         encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
645         if (IS_ERR(encoder)) {
646                 DPU_ERROR("encoder init failed for dsi display\n");
647                 return PTR_ERR(encoder);
648         }
649
650         memset(&info, 0, sizeof(info));
651
652         rc = dpu_writeback_init(dev, encoder, wb_formats,
653                         n_formats);
654         if (rc) {
655                 DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
656                 drm_encoder_cleanup(encoder);
657                 return rc;
658         }
659
660         info.num_of_h_tiles = 1;
661         /* use only WB idx 2 instance for DPU */
662         info.h_tile_instance[0] = WB_2;
663         info.intf_type = encoder->encoder_type;
664
665         rc = dpu_encoder_setup(dev, encoder, &info);
666         if (rc) {
667                 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
668                                   encoder->base.id, rc);
669                 return rc;
670         }
671
672         return 0;
673 }
674
675 /**
676  * _dpu_kms_setup_displays - create encoders, bridges and connectors
677  *                           for underlying displays
678  * @dev:        Pointer to drm device structure
679  * @priv:       Pointer to private drm device data
680  * @dpu_kms:    Pointer to dpu kms structure
681  * Returns:     Zero on success
682  */
683 static int _dpu_kms_setup_displays(struct drm_device *dev,
684                                     struct msm_drm_private *priv,
685                                     struct dpu_kms *dpu_kms)
686 {
687         int rc = 0;
688         int i;
689
690         rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
691         if (rc) {
692                 DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
693                 return rc;
694         }
695
696         rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
697         if (rc) {
698                 DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
699                 return rc;
700         }
701
702         /* Since WB isn't a driver check the catalog before initializing */
703         if (dpu_kms->catalog->wb_count) {
704                 for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
705                         if (dpu_kms->catalog->wb[i].id == WB_2) {
706                                 rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
707                                                 dpu_kms->catalog->wb[i].format_list,
708                                                 dpu_kms->catalog->wb[i].num_formats);
709                                 if (rc) {
710                                         DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
711                                         return rc;
712                                 }
713                         }
714                 }
715         }
716
717         return rc;
718 }
719
720 #define MAX_PLANES 20
721 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
722 {
723         struct drm_device *dev;
724         struct drm_plane *primary_planes[MAX_PLANES], *plane;
725         struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
726         struct drm_crtc *crtc;
727         struct drm_encoder *encoder;
728         unsigned int num_encoders;
729
730         struct msm_drm_private *priv;
731         const struct dpu_mdss_cfg *catalog;
732
733         int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
734         int max_crtc_count;
735         dev = dpu_kms->dev;
736         priv = dev->dev_private;
737         catalog = dpu_kms->catalog;
738
739         /*
740          * Create encoder and query display drivers to create
741          * bridges and connectors
742          */
743         ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
744         if (ret)
745                 return ret;
746
747         num_encoders = 0;
748         drm_for_each_encoder(encoder, dev)
749                 num_encoders++;
750
751         max_crtc_count = min(catalog->mixer_count, num_encoders);
752
753         /* Create the planes, keeping track of one primary/cursor per crtc */
754         for (i = 0; i < catalog->sspp_count; i++) {
755                 enum drm_plane_type type;
756
757                 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
758                         && cursor_planes_idx < max_crtc_count)
759                         type = DRM_PLANE_TYPE_CURSOR;
760                 else if (primary_planes_idx < max_crtc_count)
761                         type = DRM_PLANE_TYPE_PRIMARY;
762                 else
763                         type = DRM_PLANE_TYPE_OVERLAY;
764
765                 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
766                           type, catalog->sspp[i].features,
767                           catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
768
769                 plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
770                                        (1UL << max_crtc_count) - 1);
771                 if (IS_ERR(plane)) {
772                         DPU_ERROR("dpu_plane_init failed\n");
773                         ret = PTR_ERR(plane);
774                         return ret;
775                 }
776
777                 if (type == DRM_PLANE_TYPE_CURSOR)
778                         cursor_planes[cursor_planes_idx++] = plane;
779                 else if (type == DRM_PLANE_TYPE_PRIMARY)
780                         primary_planes[primary_planes_idx++] = plane;
781         }
782
783         max_crtc_count = min(max_crtc_count, primary_planes_idx);
784
785         /* Create one CRTC per encoder */
786         for (i = 0; i < max_crtc_count; i++) {
787                 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
788                 if (IS_ERR(crtc)) {
789                         ret = PTR_ERR(crtc);
790                         return ret;
791                 }
792                 priv->crtcs[priv->num_crtcs++] = crtc;
793         }
794
795         /* All CRTCs are compatible with all encoders */
796         drm_for_each_encoder(encoder, dev)
797                 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
798
799         return 0;
800 }
801
802 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
803 {
804         int i;
805
806         if (dpu_kms->hw_intr)
807                 dpu_hw_intr_destroy(dpu_kms->hw_intr);
808         dpu_kms->hw_intr = NULL;
809
810         /* safe to call these more than once during shutdown */
811         _dpu_kms_mmu_destroy(dpu_kms);
812
813         if (dpu_kms->catalog) {
814                 for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
815                         if (dpu_kms->hw_vbif[i]) {
816                                 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[i]);
817                                 dpu_kms->hw_vbif[i] = NULL;
818                         }
819                 }
820         }
821
822         if (dpu_kms->rm_init)
823                 dpu_rm_destroy(&dpu_kms->rm);
824         dpu_kms->rm_init = false;
825
826         dpu_kms->catalog = NULL;
827
828         if (dpu_kms->vbif[VBIF_NRT])
829                 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
830         dpu_kms->vbif[VBIF_NRT] = NULL;
831
832         if (dpu_kms->vbif[VBIF_RT])
833                 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
834         dpu_kms->vbif[VBIF_RT] = NULL;
835
836         if (dpu_kms->hw_mdp)
837                 dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
838         dpu_kms->hw_mdp = NULL;
839
840         if (dpu_kms->mmio)
841                 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
842         dpu_kms->mmio = NULL;
843 }
844
845 static void dpu_kms_destroy(struct msm_kms *kms)
846 {
847         struct dpu_kms *dpu_kms;
848
849         if (!kms) {
850                 DPU_ERROR("invalid kms\n");
851                 return;
852         }
853
854         dpu_kms = to_dpu_kms(kms);
855
856         _dpu_kms_hw_destroy(dpu_kms);
857
858         msm_kms_destroy(&dpu_kms->base);
859
860         if (dpu_kms->rpm_enabled)
861                 pm_runtime_disable(&dpu_kms->pdev->dev);
862 }
863
864 static int dpu_irq_postinstall(struct msm_kms *kms)
865 {
866         struct msm_drm_private *priv;
867         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
868         int i;
869
870         if (!dpu_kms || !dpu_kms->dev)
871                 return -EINVAL;
872
873         priv = dpu_kms->dev->dev_private;
874         if (!priv)
875                 return -EINVAL;
876
877         for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
878                 msm_dp_irq_postinstall(priv->dp[i]);
879
880         return 0;
881 }
882
883 static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
884 {
885         int i;
886         struct dpu_kms *dpu_kms;
887         const struct dpu_mdss_cfg *cat;
888
889         dpu_kms = to_dpu_kms(kms);
890
891         cat = dpu_kms->catalog;
892
893         pm_runtime_get_sync(&dpu_kms->pdev->dev);
894
895         /* dump CTL sub-blocks HW regs info */
896         for (i = 0; i < cat->ctl_count; i++)
897                 msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
898                                 dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
899
900         /* dump DSPP sub-blocks HW regs info */
901         for (i = 0; i < cat->dspp_count; i++)
902                 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
903                                 dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
904
905         /* dump INTF sub-blocks HW regs info */
906         for (i = 0; i < cat->intf_count; i++)
907                 msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
908                                 dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
909
910         /* dump PP sub-blocks HW regs info */
911         for (i = 0; i < cat->pingpong_count; i++)
912                 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
913                                 dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
914
915         /* dump SSPP sub-blocks HW regs info */
916         for (i = 0; i < cat->sspp_count; i++)
917                 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
918                                 dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
919
920         /* dump LM sub-blocks HW regs info */
921         for (i = 0; i < cat->mixer_count; i++)
922                 msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
923                                 dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
924
925         /* dump WB sub-blocks HW regs info */
926         for (i = 0; i < cat->wb_count; i++)
927                 msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
928                                 dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
929
930         if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
931                 msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
932                                 dpu_kms->mmio + cat->mdp[0].base, "top");
933                 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
934                                 dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2");
935         } else {
936                 msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
937                                 dpu_kms->mmio + cat->mdp[0].base, "top");
938         }
939
940         pm_runtime_put_sync(&dpu_kms->pdev->dev);
941 }
942
943 static const struct msm_kms_funcs kms_funcs = {
944         .hw_init         = dpu_kms_hw_init,
945         .irq_preinstall  = dpu_core_irq_preinstall,
946         .irq_postinstall = dpu_irq_postinstall,
947         .irq_uninstall   = dpu_core_irq_uninstall,
948         .irq             = dpu_core_irq,
949         .enable_commit   = dpu_kms_enable_commit,
950         .disable_commit  = dpu_kms_disable_commit,
951         .vsync_time      = dpu_kms_vsync_time,
952         .prepare_commit  = dpu_kms_prepare_commit,
953         .flush_commit    = dpu_kms_flush_commit,
954         .wait_flush      = dpu_kms_wait_flush,
955         .complete_commit = dpu_kms_complete_commit,
956         .enable_vblank   = dpu_kms_enable_vblank,
957         .disable_vblank  = dpu_kms_disable_vblank,
958         .check_modified_format = dpu_format_check_modified_format,
959         .get_format      = dpu_get_msm_format,
960         .destroy         = dpu_kms_destroy,
961         .snapshot        = dpu_kms_mdp_snapshot,
962 #ifdef CONFIG_DEBUG_FS
963         .debugfs_init    = dpu_kms_debugfs_init,
964 #endif
965 };
966
967 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
968 {
969         struct msm_mmu *mmu;
970
971         if (!dpu_kms->base.aspace)
972                 return;
973
974         mmu = dpu_kms->base.aspace->mmu;
975
976         mmu->funcs->detach(mmu);
977         msm_gem_address_space_put(dpu_kms->base.aspace);
978
979         dpu_kms->base.aspace = NULL;
980 }
981
982 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
983 {
984         struct msm_gem_address_space *aspace;
985
986         aspace = msm_kms_init_aspace(dpu_kms->dev);
987         if (IS_ERR(aspace))
988                 return PTR_ERR(aspace);
989
990         dpu_kms->base.aspace = aspace;
991
992         return 0;
993 }
994
995 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
996 {
997         struct clk *clk;
998
999         clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
1000         if (!clk)
1001                 return -EINVAL;
1002
1003         return clk_get_rate(clk);
1004 }
1005
1006 static int dpu_kms_hw_init(struct msm_kms *kms)
1007 {
1008         struct dpu_kms *dpu_kms;
1009         struct drm_device *dev;
1010         int i, rc = -EINVAL;
1011
1012         if (!kms) {
1013                 DPU_ERROR("invalid kms\n");
1014                 return rc;
1015         }
1016
1017         dpu_kms = to_dpu_kms(kms);
1018         dev = dpu_kms->dev;
1019
1020         rc = dpu_kms_global_obj_init(dpu_kms);
1021         if (rc)
1022                 return rc;
1023
1024         atomic_set(&dpu_kms->bandwidth_ref, 0);
1025
1026         dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
1027         if (IS_ERR(dpu_kms->mmio)) {
1028                 rc = PTR_ERR(dpu_kms->mmio);
1029                 DPU_ERROR("mdp register memory map failed: %d\n", rc);
1030                 dpu_kms->mmio = NULL;
1031                 goto error;
1032         }
1033         DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1034
1035         dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
1036         if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1037                 rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1038                 DPU_ERROR("vbif register memory map failed: %d\n", rc);
1039                 dpu_kms->vbif[VBIF_RT] = NULL;
1040                 goto error;
1041         }
1042         dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
1043         if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1044                 dpu_kms->vbif[VBIF_NRT] = NULL;
1045                 DPU_DEBUG("VBIF NRT is not defined");
1046         }
1047
1048         dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma");
1049         if (IS_ERR(dpu_kms->reg_dma)) {
1050                 dpu_kms->reg_dma = NULL;
1051                 DPU_DEBUG("REG_DMA is not defined");
1052         }
1053
1054         dpu_kms_parse_data_bus_icc_path(dpu_kms);
1055
1056         rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
1057         if (rc < 0)
1058                 goto error;
1059
1060         dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
1061
1062         pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
1063
1064         dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
1065         if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
1066                 rc = PTR_ERR(dpu_kms->catalog);
1067                 if (!dpu_kms->catalog)
1068                         rc = -EINVAL;
1069                 DPU_ERROR("catalog init failed: %d\n", rc);
1070                 dpu_kms->catalog = NULL;
1071                 goto power_error;
1072         }
1073
1074         /*
1075          * Now we need to read the HW catalog and initialize resources such as
1076          * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1077          */
1078         rc = _dpu_kms_mmu_init(dpu_kms);
1079         if (rc) {
1080                 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1081                 goto power_error;
1082         }
1083
1084         rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
1085         if (rc) {
1086                 DPU_ERROR("rm init failed: %d\n", rc);
1087                 goto power_error;
1088         }
1089
1090         dpu_kms->rm_init = true;
1091
1092         dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
1093                                              dpu_kms->catalog);
1094         if (IS_ERR(dpu_kms->hw_mdp)) {
1095                 rc = PTR_ERR(dpu_kms->hw_mdp);
1096                 DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1097                 dpu_kms->hw_mdp = NULL;
1098                 goto power_error;
1099         }
1100
1101         for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1102                 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
1103
1104                 dpu_kms->hw_vbif[vbif_idx] = dpu_hw_vbif_init(vbif_idx,
1105                                 dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
1106                 if (IS_ERR(dpu_kms->hw_vbif[vbif_idx])) {
1107                         rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
1108                         DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
1109                         dpu_kms->hw_vbif[vbif_idx] = NULL;
1110                         goto power_error;
1111                 }
1112         }
1113
1114         rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
1115                         msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
1116         if (rc) {
1117                 DPU_ERROR("failed to init perf %d\n", rc);
1118                 goto perf_err;
1119         }
1120
1121         dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
1122         if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
1123                 rc = PTR_ERR(dpu_kms->hw_intr);
1124                 DPU_ERROR("hw_intr init failed: %d\n", rc);
1125                 dpu_kms->hw_intr = NULL;
1126                 goto hw_intr_init_err;
1127         }
1128
1129         dev->mode_config.min_width = 0;
1130         dev->mode_config.min_height = 0;
1131
1132         /*
1133          * max crtc width is equal to the max mixer width * 2 and max height is
1134          * is 4K
1135          */
1136         dev->mode_config.max_width =
1137                         dpu_kms->catalog->caps->max_mixer_width * 2;
1138         dev->mode_config.max_height = 4096;
1139
1140         dev->max_vblank_count = 0xffffffff;
1141         /* Disable vblank irqs aggressively for power-saving */
1142         dev->vblank_disable_immediate = true;
1143
1144         /*
1145          * _dpu_kms_drm_obj_init should create the DRM related objects
1146          * i.e. CRTCs, planes, encoders, connectors and so forth
1147          */
1148         rc = _dpu_kms_drm_obj_init(dpu_kms);
1149         if (rc) {
1150                 DPU_ERROR("modeset init failed: %d\n", rc);
1151                 goto drm_obj_init_err;
1152         }
1153
1154         dpu_vbif_init_memtypes(dpu_kms);
1155
1156         pm_runtime_put_sync(&dpu_kms->pdev->dev);
1157
1158         return 0;
1159
1160 drm_obj_init_err:
1161         dpu_core_perf_destroy(&dpu_kms->perf);
1162 hw_intr_init_err:
1163 perf_err:
1164 power_error:
1165         pm_runtime_put_sync(&dpu_kms->pdev->dev);
1166 error:
1167         _dpu_kms_hw_destroy(dpu_kms);
1168
1169         return rc;
1170 }
1171
1172 static int dpu_kms_init(struct drm_device *ddev)
1173 {
1174         struct msm_drm_private *priv = ddev->dev_private;
1175         struct device *dev = ddev->dev;
1176         struct platform_device *pdev = to_platform_device(dev);
1177         struct dpu_kms *dpu_kms;
1178         int irq;
1179         struct dev_pm_opp *opp;
1180         int ret = 0;
1181         unsigned long max_freq = ULONG_MAX;
1182
1183         dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1184         if (!dpu_kms)
1185                 return -ENOMEM;
1186
1187         ret = devm_pm_opp_set_clkname(dev, "core");
1188         if (ret)
1189                 return ret;
1190         /* OPP table is optional */
1191         ret = devm_pm_opp_of_add_table(dev);
1192         if (ret && ret != -ENODEV) {
1193                 dev_err(dev, "invalid OPP table in device tree\n");
1194                 return ret;
1195         }
1196
1197         ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1198         if (ret < 0) {
1199                 DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1200                 return ret;
1201         }
1202         dpu_kms->num_clocks = ret;
1203
1204         opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1205         if (!IS_ERR(opp))
1206                 dev_pm_opp_put(opp);
1207
1208         dev_pm_opp_set_rate(dev, max_freq);
1209
1210         ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
1211         if (ret) {
1212                 DPU_ERROR("failed to init kms, ret=%d\n", ret);
1213                 return ret;
1214         }
1215         dpu_kms->dev = ddev;
1216         dpu_kms->pdev = pdev;
1217
1218         pm_runtime_enable(&pdev->dev);
1219         dpu_kms->rpm_enabled = true;
1220
1221         priv->kms = &dpu_kms->base;
1222
1223         irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1224         if (!irq) {
1225                 DPU_ERROR("failed to get irq\n");
1226                 return -EINVAL;
1227         }
1228         dpu_kms->base.irq = irq;
1229
1230         return 0;
1231 }
1232
1233 static int dpu_dev_probe(struct platform_device *pdev)
1234 {
1235         return msm_drv_probe(&pdev->dev, dpu_kms_init);
1236 }
1237
1238 static int dpu_dev_remove(struct platform_device *pdev)
1239 {
1240         component_master_del(&pdev->dev, &msm_drm_ops);
1241
1242         return 0;
1243 }
1244
1245 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1246 {
1247         int i;
1248         struct platform_device *pdev = to_platform_device(dev);
1249         struct msm_drm_private *priv = platform_get_drvdata(pdev);
1250         struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1251
1252         /* Drop the performance state vote */
1253         dev_pm_opp_set_rate(dev, 0);
1254         clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
1255
1256         for (i = 0; i < dpu_kms->num_paths; i++)
1257                 icc_set_bw(dpu_kms->path[i], 0, 0);
1258
1259         return 0;
1260 }
1261
1262 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1263 {
1264         int rc = -1;
1265         struct platform_device *pdev = to_platform_device(dev);
1266         struct msm_drm_private *priv = platform_get_drvdata(pdev);
1267         struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1268         struct drm_encoder *encoder;
1269         struct drm_device *ddev;
1270
1271         ddev = dpu_kms->dev;
1272
1273         rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
1274         if (rc) {
1275                 DPU_ERROR("clock enable failed rc:%d\n", rc);
1276                 return rc;
1277         }
1278
1279         dpu_vbif_init_memtypes(dpu_kms);
1280
1281         drm_for_each_encoder(encoder, ddev)
1282                 dpu_encoder_virt_runtime_resume(encoder);
1283
1284         return rc;
1285 }
1286
1287 static const struct dev_pm_ops dpu_pm_ops = {
1288         SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1289         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1290                                 pm_runtime_force_resume)
1291         .prepare = msm_pm_prepare,
1292         .complete = msm_pm_complete,
1293 };
1294
1295 static const struct of_device_id dpu_dt_match[] = {
1296         { .compatible = "qcom,msm8998-dpu", },
1297         { .compatible = "qcom,qcm2290-dpu", },
1298         { .compatible = "qcom,sdm845-dpu", },
1299         { .compatible = "qcom,sc7180-dpu", },
1300         { .compatible = "qcom,sc7280-dpu", },
1301         { .compatible = "qcom,sc8180x-dpu", },
1302         { .compatible = "qcom,sm6115-dpu", },
1303         { .compatible = "qcom,sm8150-dpu", },
1304         { .compatible = "qcom,sm8250-dpu", },
1305         {}
1306 };
1307 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1308
1309 static struct platform_driver dpu_driver = {
1310         .probe = dpu_dev_probe,
1311         .remove = dpu_dev_remove,
1312         .shutdown = msm_drv_shutdown,
1313         .driver = {
1314                 .name = "msm_dpu",
1315                 .of_match_table = dpu_dt_match,
1316                 .pm = &dpu_pm_ops,
1317         },
1318 };
1319
1320 void __init msm_dpu_register(void)
1321 {
1322         platform_driver_register(&dpu_driver);
1323 }
1324
1325 void __exit msm_dpu_unregister(void)
1326 {
1327         platform_driver_unregister(&dpu_driver);
1328 }