Merge tag 'net-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_kms.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
5  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6  *
7  * Author: Rob Clark <robdclark@gmail.com>
8  */
9
10 #define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
11
12 #include <linux/debugfs.h>
13 #include <linux/dma-buf.h>
14 #include <linux/of_irq.h>
15 #include <linux/pm_opp.h>
16
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_file.h>
19 #include <drm/drm_vblank.h>
20 #include <drm/drm_writeback.h>
21
22 #include "msm_drv.h"
23 #include "msm_mmu.h"
24 #include "msm_gem.h"
25 #include "disp/msm_disp_snapshot.h"
26
27 #include "dpu_core_irq.h"
28 #include "dpu_crtc.h"
29 #include "dpu_encoder.h"
30 #include "dpu_formats.h"
31 #include "dpu_hw_vbif.h"
32 #include "dpu_kms.h"
33 #include "dpu_plane.h"
34 #include "dpu_vbif.h"
35 #include "dpu_writeback.h"
36
37 #define CREATE_TRACE_POINTS
38 #include "dpu_trace.h"
39
40 /*
41  * To enable overall DRM driver logging
42  * # echo 0x2 > /sys/module/drm/parameters/debug
43  *
44  * To enable DRM driver h/w logging
45  * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
46  *
47  * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
48  */
49 #define DPU_DEBUGFS_DIR "msm_dpu"
50 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
51
52 #define MIN_IB_BW       400000000ULL /* Min ib vote 400MB */
53
54 static int dpu_kms_hw_init(struct msm_kms *kms);
55 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
56
57 #ifdef CONFIG_DEBUG_FS
58 static int _dpu_danger_signal_status(struct seq_file *s,
59                 bool danger_status)
60 {
61         struct dpu_kms *kms = (struct dpu_kms *)s->private;
62         struct dpu_danger_safe_status status;
63         int i;
64
65         if (!kms->hw_mdp) {
66                 DPU_ERROR("invalid arg(s)\n");
67                 return 0;
68         }
69
70         memset(&status, 0, sizeof(struct dpu_danger_safe_status));
71
72         pm_runtime_get_sync(&kms->pdev->dev);
73         if (danger_status) {
74                 seq_puts(s, "\nDanger signal status:\n");
75                 if (kms->hw_mdp->ops.get_danger_status)
76                         kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
77                                         &status);
78         } else {
79                 seq_puts(s, "\nSafe signal status:\n");
80                 if (kms->hw_mdp->ops.get_safe_status)
81                         kms->hw_mdp->ops.get_safe_status(kms->hw_mdp,
82                                         &status);
83         }
84         pm_runtime_put_sync(&kms->pdev->dev);
85
86         seq_printf(s, "MDP     :  0x%x\n", status.mdp);
87
88         for (i = SSPP_VIG0; i < SSPP_MAX; i++)
89                 seq_printf(s, "SSPP%d   :  0x%x  \n", i - SSPP_VIG0,
90                                 status.sspp[i]);
91         seq_puts(s, "\n");
92
93         return 0;
94 }
95
96 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
97 {
98         return _dpu_danger_signal_status(s, true);
99 }
100 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
101
102 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
103 {
104         return _dpu_danger_signal_status(s, false);
105 }
106 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
107
108 static ssize_t _dpu_plane_danger_read(struct file *file,
109                         char __user *buff, size_t count, loff_t *ppos)
110 {
111         struct dpu_kms *kms = file->private_data;
112         int len;
113         char buf[40];
114
115         len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
116
117         return simple_read_from_buffer(buff, count, ppos, buf, len);
118 }
119
120 static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
121 {
122         struct drm_plane *plane;
123
124         drm_for_each_plane(plane, kms->dev) {
125                 if (plane->fb && plane->state) {
126                         dpu_plane_danger_signal_ctrl(plane, enable);
127                         DPU_DEBUG("plane:%d img:%dx%d ",
128                                 plane->base.id, plane->fb->width,
129                                 plane->fb->height);
130                         DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
131                                 plane->state->src_x >> 16,
132                                 plane->state->src_y >> 16,
133                                 plane->state->src_w >> 16,
134                                 plane->state->src_h >> 16,
135                                 plane->state->crtc_x, plane->state->crtc_y,
136                                 plane->state->crtc_w, plane->state->crtc_h);
137                 } else {
138                         DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
139                 }
140         }
141 }
142
143 static ssize_t _dpu_plane_danger_write(struct file *file,
144                     const char __user *user_buf, size_t count, loff_t *ppos)
145 {
146         struct dpu_kms *kms = file->private_data;
147         int disable_panic;
148         int ret;
149
150         ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
151         if (ret)
152                 return ret;
153
154         if (disable_panic) {
155                 /* Disable panic signal for all active pipes */
156                 DPU_DEBUG("Disabling danger:\n");
157                 _dpu_plane_set_danger_state(kms, false);
158                 kms->has_danger_ctrl = false;
159         } else {
160                 /* Enable panic signal for all active pipes */
161                 DPU_DEBUG("Enabling danger:\n");
162                 kms->has_danger_ctrl = true;
163                 _dpu_plane_set_danger_state(kms, true);
164         }
165
166         return count;
167 }
168
169 static const struct file_operations dpu_plane_danger_enable = {
170         .open = simple_open,
171         .read = _dpu_plane_danger_read,
172         .write = _dpu_plane_danger_write,
173 };
174
175 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
176                 struct dentry *parent)
177 {
178         struct dentry *entry = debugfs_create_dir("danger", parent);
179
180         debugfs_create_file("danger_status", 0600, entry,
181                         dpu_kms, &dpu_debugfs_danger_stats_fops);
182         debugfs_create_file("safe_status", 0600, entry,
183                         dpu_kms, &dpu_debugfs_safe_stats_fops);
184         debugfs_create_file("disable_danger", 0600, entry,
185                         dpu_kms, &dpu_plane_danger_enable);
186
187 }
188
189 /*
190  * Companion structure for dpu_debugfs_create_regset32.
191  */
192 struct dpu_debugfs_regset32 {
193         uint32_t offset;
194         uint32_t blk_len;
195         struct dpu_kms *dpu_kms;
196 };
197
198 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
199 {
200         struct dpu_debugfs_regset32 *regset = s->private;
201         struct dpu_kms *dpu_kms = regset->dpu_kms;
202         void __iomem *base;
203         uint32_t i, addr;
204
205         if (!dpu_kms->mmio)
206                 return 0;
207
208         base = dpu_kms->mmio + regset->offset;
209
210         /* insert padding spaces, if needed */
211         if (regset->offset & 0xF) {
212                 seq_printf(s, "[%x]", regset->offset & ~0xF);
213                 for (i = 0; i < (regset->offset & 0xF); i += 4)
214                         seq_puts(s, "         ");
215         }
216
217         pm_runtime_get_sync(&dpu_kms->pdev->dev);
218
219         /* main register output */
220         for (i = 0; i < regset->blk_len; i += 4) {
221                 addr = regset->offset + i;
222                 if ((addr & 0xF) == 0x0)
223                         seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
224                 seq_printf(s, " %08x", readl_relaxed(base + i));
225         }
226         seq_puts(s, "\n");
227         pm_runtime_put_sync(&dpu_kms->pdev->dev);
228
229         return 0;
230 }
231
232 static int dpu_debugfs_open_regset32(struct inode *inode,
233                 struct file *file)
234 {
235         return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
236 }
237
238 static const struct file_operations dpu_fops_regset32 = {
239         .open =         dpu_debugfs_open_regset32,
240         .read =         seq_read,
241         .llseek =       seq_lseek,
242         .release =      single_release,
243 };
244
245 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
246                 void *parent,
247                 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
248 {
249         struct dpu_debugfs_regset32 *regset;
250
251         if (WARN_ON(!name || !dpu_kms || !length))
252                 return;
253
254         regset = devm_kzalloc(&dpu_kms->pdev->dev, sizeof(*regset), GFP_KERNEL);
255         if (!regset)
256                 return;
257
258         /* make sure offset is a multiple of 4 */
259         regset->offset = round_down(offset, 4);
260         regset->blk_len = length;
261         regset->dpu_kms = dpu_kms;
262
263         debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
264 }
265
266 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
267 {
268         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
269         void *p = dpu_hw_util_get_log_mask_ptr();
270         struct dentry *entry;
271         struct drm_device *dev;
272         struct msm_drm_private *priv;
273         int i;
274
275         if (!p)
276                 return -EINVAL;
277
278         /* Only create a set of debugfs for the primary node, ignore render nodes */
279         if (minor->type != DRM_MINOR_PRIMARY)
280                 return 0;
281
282         dev = dpu_kms->dev;
283         priv = dev->dev_private;
284
285         entry = debugfs_create_dir("debug", minor->debugfs_root);
286
287         debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
288
289         dpu_debugfs_danger_init(dpu_kms, entry);
290         dpu_debugfs_vbif_init(dpu_kms, entry);
291         dpu_debugfs_core_irq_init(dpu_kms, entry);
292         dpu_debugfs_sspp_init(dpu_kms, entry);
293
294         for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
295                 if (priv->dp[i])
296                         msm_dp_debugfs_init(priv->dp[i], minor);
297         }
298
299         return dpu_core_perf_debugfs_init(dpu_kms, entry);
300 }
301 #endif
302
303 /* Global/shared object state funcs */
304
305 /*
306  * This is a helper that returns the private state currently in operation.
307  * Note that this would return the "old_state" if called in the atomic check
308  * path, and the "new_state" after the atomic swap has been done.
309  */
310 struct dpu_global_state *
311 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
312 {
313         return to_dpu_global_state(dpu_kms->global_state.state);
314 }
315
316 /*
317  * This acquires the modeset lock set aside for global state, creates
318  * a new duplicated private object state.
319  */
320 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
321 {
322         struct msm_drm_private *priv = s->dev->dev_private;
323         struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
324         struct drm_private_state *priv_state;
325         int ret;
326
327         ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
328         if (ret)
329                 return ERR_PTR(ret);
330
331         priv_state = drm_atomic_get_private_obj_state(s,
332                                                 &dpu_kms->global_state);
333         if (IS_ERR(priv_state))
334                 return ERR_CAST(priv_state);
335
336         return to_dpu_global_state(priv_state);
337 }
338
339 static struct drm_private_state *
340 dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
341 {
342         struct dpu_global_state *state;
343
344         state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
345         if (!state)
346                 return NULL;
347
348         __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
349
350         return &state->base;
351 }
352
353 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
354                                       struct drm_private_state *state)
355 {
356         struct dpu_global_state *dpu_state = to_dpu_global_state(state);
357
358         kfree(dpu_state);
359 }
360
361 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
362         .atomic_duplicate_state = dpu_kms_global_duplicate_state,
363         .atomic_destroy_state = dpu_kms_global_destroy_state,
364 };
365
366 static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
367 {
368         struct dpu_global_state *state;
369
370         drm_modeset_lock_init(&dpu_kms->global_state_lock);
371
372         state = kzalloc(sizeof(*state), GFP_KERNEL);
373         if (!state)
374                 return -ENOMEM;
375
376         drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
377                                     &state->base,
378                                     &dpu_kms_global_state_funcs);
379         return 0;
380 }
381
382 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
383 {
384         struct icc_path *path0;
385         struct icc_path *path1;
386         struct drm_device *dev = dpu_kms->dev;
387         struct device *dpu_dev = dev->dev;
388         struct device *mdss_dev = dpu_dev->parent;
389
390         /* Interconnects are a part of MDSS device tree binding, not the
391          * MDP/DPU device. */
392         path0 = of_icc_get(mdss_dev, "mdp0-mem");
393         path1 = of_icc_get(mdss_dev, "mdp1-mem");
394
395         if (IS_ERR_OR_NULL(path0))
396                 return PTR_ERR_OR_ZERO(path0);
397
398         dpu_kms->path[0] = path0;
399         dpu_kms->num_paths = 1;
400
401         if (!IS_ERR_OR_NULL(path1)) {
402                 dpu_kms->path[1] = path1;
403                 dpu_kms->num_paths++;
404         }
405         return 0;
406 }
407
408 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
409 {
410         return dpu_crtc_vblank(crtc, true);
411 }
412
413 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
414 {
415         dpu_crtc_vblank(crtc, false);
416 }
417
418 static void dpu_kms_enable_commit(struct msm_kms *kms)
419 {
420         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
421         pm_runtime_get_sync(&dpu_kms->pdev->dev);
422 }
423
424 static void dpu_kms_disable_commit(struct msm_kms *kms)
425 {
426         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
427         pm_runtime_put_sync(&dpu_kms->pdev->dev);
428 }
429
430 static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
431 {
432         struct drm_encoder *encoder;
433
434         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
435                 ktime_t vsync_time;
436
437                 if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
438                         return vsync_time;
439         }
440
441         return ktime_get();
442 }
443
444 static void dpu_kms_prepare_commit(struct msm_kms *kms,
445                 struct drm_atomic_state *state)
446 {
447         struct drm_crtc *crtc;
448         struct drm_crtc_state *crtc_state;
449         struct drm_encoder *encoder;
450         int i;
451
452         if (!kms)
453                 return;
454
455         /* Call prepare_commit for all affected encoders */
456         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
457                 drm_for_each_encoder_mask(encoder, crtc->dev,
458                                           crtc_state->encoder_mask) {
459                         dpu_encoder_prepare_commit(encoder);
460                 }
461         }
462 }
463
464 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
465 {
466         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
467         struct drm_crtc *crtc;
468
469         for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
470                 if (!crtc->state->active)
471                         continue;
472
473                 trace_dpu_kms_commit(DRMID(crtc));
474                 dpu_crtc_commit_kickoff(crtc);
475         }
476 }
477
478 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
479 {
480         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
481         struct drm_crtc *crtc;
482
483         DPU_ATRACE_BEGIN("kms_complete_commit");
484
485         for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
486                 dpu_crtc_complete_commit(crtc);
487
488         DPU_ATRACE_END("kms_complete_commit");
489 }
490
491 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
492                 struct drm_crtc *crtc)
493 {
494         struct drm_encoder *encoder;
495         struct drm_device *dev;
496         int ret;
497
498         if (!kms || !crtc || !crtc->state) {
499                 DPU_ERROR("invalid params\n");
500                 return;
501         }
502
503         dev = crtc->dev;
504
505         if (!crtc->state->enable) {
506                 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
507                 return;
508         }
509
510         if (!crtc->state->active) {
511                 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
512                 return;
513         }
514
515         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
516                 if (encoder->crtc != crtc)
517                         continue;
518                 /*
519                  * Wait for post-flush if necessary to delay before
520                  * plane_cleanup. For example, wait for vsync in case of video
521                  * mode panels. This may be a no-op for command mode panels.
522                  */
523                 trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
524                 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
525                 if (ret && ret != -EWOULDBLOCK) {
526                         DPU_ERROR("wait for commit done returned %d\n", ret);
527                         break;
528                 }
529         }
530 }
531
532 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
533 {
534         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
535         struct drm_crtc *crtc;
536
537         for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
538                 dpu_kms_wait_for_commit_done(kms, crtc);
539 }
540
541 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
542                                     struct msm_drm_private *priv,
543                                     struct dpu_kms *dpu_kms)
544 {
545         struct drm_encoder *encoder = NULL;
546         struct msm_display_info info;
547         int i, rc = 0;
548
549         if (!(priv->dsi[0] || priv->dsi[1]))
550                 return rc;
551
552         /*
553          * We support following confiurations:
554          * - Single DSI host (dsi0 or dsi1)
555          * - Two independent DSI hosts
556          * - Bonded DSI0 and DSI1 hosts
557          *
558          * TODO: Support swapping DSI0 and DSI1 in the bonded setup.
559          */
560         for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
561                 int other = (i + 1) % 2;
562
563                 if (!priv->dsi[i])
564                         continue;
565
566                 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) &&
567                     !msm_dsi_is_master_dsi(priv->dsi[i]))
568                         continue;
569
570                 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
571                 if (IS_ERR(encoder)) {
572                         DPU_ERROR("encoder init failed for dsi display\n");
573                         return PTR_ERR(encoder);
574                 }
575
576                 memset(&info, 0, sizeof(info));
577                 info.intf_type = encoder->encoder_type;
578
579                 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
580                 if (rc) {
581                         DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
582                                 i, rc);
583                         break;
584                 }
585
586                 info.h_tile_instance[info.num_of_h_tiles++] = i;
587                 info.capabilities = msm_dsi_is_cmd_mode(priv->dsi[i]) ?
588                         MSM_DISPLAY_CAP_CMD_MODE :
589                         MSM_DISPLAY_CAP_VID_MODE;
590
591                 info.dsc = msm_dsi_get_dsc_config(priv->dsi[i]);
592
593                 if (msm_dsi_is_bonded_dsi(priv->dsi[i]) && priv->dsi[other]) {
594                         rc = msm_dsi_modeset_init(priv->dsi[other], dev, encoder);
595                         if (rc) {
596                                 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
597                                         other, rc);
598                                 break;
599                         }
600
601                         info.h_tile_instance[info.num_of_h_tiles++] = other;
602                 }
603
604                 rc = dpu_encoder_setup(dev, encoder, &info);
605                 if (rc)
606                         DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
607                                   encoder->base.id, rc);
608         }
609
610         return rc;
611 }
612
613 static int _dpu_kms_initialize_displayport(struct drm_device *dev,
614                                             struct msm_drm_private *priv,
615                                             struct dpu_kms *dpu_kms)
616 {
617         struct drm_encoder *encoder = NULL;
618         struct msm_display_info info;
619         int rc;
620         int i;
621
622         for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
623                 if (!priv->dp[i])
624                         continue;
625
626                 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
627                 if (IS_ERR(encoder)) {
628                         DPU_ERROR("encoder init failed for dsi display\n");
629                         return PTR_ERR(encoder);
630                 }
631
632                 memset(&info, 0, sizeof(info));
633                 rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
634                 if (rc) {
635                         DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
636                         drm_encoder_cleanup(encoder);
637                         return rc;
638                 }
639
640                 info.num_of_h_tiles = 1;
641                 info.h_tile_instance[0] = i;
642                 info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
643                 info.intf_type = encoder->encoder_type;
644                 rc = dpu_encoder_setup(dev, encoder, &info);
645                 if (rc) {
646                         DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
647                                   encoder->base.id, rc);
648                         return rc;
649                 }
650         }
651
652         return 0;
653 }
654
655 static int _dpu_kms_initialize_writeback(struct drm_device *dev,
656                 struct msm_drm_private *priv, struct dpu_kms *dpu_kms,
657                 const u32 *wb_formats, int n_formats)
658 {
659         struct drm_encoder *encoder = NULL;
660         struct msm_display_info info;
661         int rc;
662
663         encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_VIRTUAL);
664         if (IS_ERR(encoder)) {
665                 DPU_ERROR("encoder init failed for dsi display\n");
666                 return PTR_ERR(encoder);
667         }
668
669         memset(&info, 0, sizeof(info));
670
671         rc = dpu_writeback_init(dev, encoder, wb_formats,
672                         n_formats);
673         if (rc) {
674                 DPU_ERROR("dpu_writeback_init, rc = %d\n", rc);
675                 drm_encoder_cleanup(encoder);
676                 return rc;
677         }
678
679         info.num_of_h_tiles = 1;
680         /* use only WB idx 2 instance for DPU */
681         info.h_tile_instance[0] = WB_2;
682         info.intf_type = encoder->encoder_type;
683
684         rc = dpu_encoder_setup(dev, encoder, &info);
685         if (rc) {
686                 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
687                                   encoder->base.id, rc);
688                 return rc;
689         }
690
691         return 0;
692 }
693
694 /**
695  * _dpu_kms_setup_displays - create encoders, bridges and connectors
696  *                           for underlying displays
697  * @dev:        Pointer to drm device structure
698  * @priv:       Pointer to private drm device data
699  * @dpu_kms:    Pointer to dpu kms structure
700  * Returns:     Zero on success
701  */
702 static int _dpu_kms_setup_displays(struct drm_device *dev,
703                                     struct msm_drm_private *priv,
704                                     struct dpu_kms *dpu_kms)
705 {
706         int rc = 0;
707         int i;
708
709         rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
710         if (rc) {
711                 DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
712                 return rc;
713         }
714
715         rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
716         if (rc) {
717                 DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
718                 return rc;
719         }
720
721         /* Since WB isn't a driver check the catalog before initializing */
722         if (dpu_kms->catalog->wb_count) {
723                 for (i = 0; i < dpu_kms->catalog->wb_count; i++) {
724                         if (dpu_kms->catalog->wb[i].id == WB_2) {
725                                 rc = _dpu_kms_initialize_writeback(dev, priv, dpu_kms,
726                                                 dpu_kms->catalog->wb[i].format_list,
727                                                 dpu_kms->catalog->wb[i].num_formats);
728                                 if (rc) {
729                                         DPU_ERROR("initialize_WB failed, rc = %d\n", rc);
730                                         return rc;
731                                 }
732                         }
733                 }
734         }
735
736         return rc;
737 }
738
739 #define MAX_PLANES 20
740 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
741 {
742         struct drm_device *dev;
743         struct drm_plane *primary_planes[MAX_PLANES], *plane;
744         struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
745         struct drm_crtc *crtc;
746         struct drm_encoder *encoder;
747         unsigned int num_encoders;
748
749         struct msm_drm_private *priv;
750         struct dpu_mdss_cfg *catalog;
751
752         int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
753         int max_crtc_count;
754         dev = dpu_kms->dev;
755         priv = dev->dev_private;
756         catalog = dpu_kms->catalog;
757
758         /*
759          * Create encoder and query display drivers to create
760          * bridges and connectors
761          */
762         ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
763         if (ret)
764                 return ret;
765
766         num_encoders = 0;
767         drm_for_each_encoder(encoder, dev)
768                 num_encoders++;
769
770         max_crtc_count = min(catalog->mixer_count, num_encoders);
771
772         /* Create the planes, keeping track of one primary/cursor per crtc */
773         for (i = 0; i < catalog->sspp_count; i++) {
774                 enum drm_plane_type type;
775
776                 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
777                         && cursor_planes_idx < max_crtc_count)
778                         type = DRM_PLANE_TYPE_CURSOR;
779                 else if (primary_planes_idx < max_crtc_count)
780                         type = DRM_PLANE_TYPE_PRIMARY;
781                 else
782                         type = DRM_PLANE_TYPE_OVERLAY;
783
784                 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
785                           type, catalog->sspp[i].features,
786                           catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
787
788                 plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
789                                        (1UL << max_crtc_count) - 1, 0);
790                 if (IS_ERR(plane)) {
791                         DPU_ERROR("dpu_plane_init failed\n");
792                         ret = PTR_ERR(plane);
793                         return ret;
794                 }
795
796                 if (type == DRM_PLANE_TYPE_CURSOR)
797                         cursor_planes[cursor_planes_idx++] = plane;
798                 else if (type == DRM_PLANE_TYPE_PRIMARY)
799                         primary_planes[primary_planes_idx++] = plane;
800         }
801
802         max_crtc_count = min(max_crtc_count, primary_planes_idx);
803
804         /* Create one CRTC per encoder */
805         for (i = 0; i < max_crtc_count; i++) {
806                 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
807                 if (IS_ERR(crtc)) {
808                         ret = PTR_ERR(crtc);
809                         return ret;
810                 }
811                 priv->crtcs[priv->num_crtcs++] = crtc;
812         }
813
814         /* All CRTCs are compatible with all encoders */
815         drm_for_each_encoder(encoder, dev)
816                 encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
817
818         return 0;
819 }
820
821 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
822 {
823         int i;
824
825         if (dpu_kms->hw_intr)
826                 dpu_hw_intr_destroy(dpu_kms->hw_intr);
827         dpu_kms->hw_intr = NULL;
828
829         /* safe to call these more than once during shutdown */
830         _dpu_kms_mmu_destroy(dpu_kms);
831
832         if (dpu_kms->catalog) {
833                 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
834                         u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
835
836                         if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx]) {
837                                 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
838                                 dpu_kms->hw_vbif[vbif_idx] = NULL;
839                         }
840                 }
841         }
842
843         if (dpu_kms->rm_init)
844                 dpu_rm_destroy(&dpu_kms->rm);
845         dpu_kms->rm_init = false;
846
847         if (dpu_kms->catalog)
848                 dpu_hw_catalog_deinit(dpu_kms->catalog);
849         dpu_kms->catalog = NULL;
850
851         if (dpu_kms->vbif[VBIF_NRT])
852                 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
853         dpu_kms->vbif[VBIF_NRT] = NULL;
854
855         if (dpu_kms->vbif[VBIF_RT])
856                 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
857         dpu_kms->vbif[VBIF_RT] = NULL;
858
859         if (dpu_kms->hw_mdp)
860                 dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
861         dpu_kms->hw_mdp = NULL;
862
863         if (dpu_kms->mmio)
864                 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
865         dpu_kms->mmio = NULL;
866 }
867
868 static void dpu_kms_destroy(struct msm_kms *kms)
869 {
870         struct dpu_kms *dpu_kms;
871
872         if (!kms) {
873                 DPU_ERROR("invalid kms\n");
874                 return;
875         }
876
877         dpu_kms = to_dpu_kms(kms);
878
879         _dpu_kms_hw_destroy(dpu_kms);
880
881         msm_kms_destroy(&dpu_kms->base);
882
883         if (dpu_kms->rpm_enabled)
884                 pm_runtime_disable(&dpu_kms->pdev->dev);
885 }
886
887 static int dpu_irq_postinstall(struct msm_kms *kms)
888 {
889         struct msm_drm_private *priv;
890         struct dpu_kms *dpu_kms = to_dpu_kms(kms);
891         int i;
892
893         if (!dpu_kms || !dpu_kms->dev)
894                 return -EINVAL;
895
896         priv = dpu_kms->dev->dev_private;
897         if (!priv)
898                 return -EINVAL;
899
900         for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
901                 msm_dp_irq_postinstall(priv->dp[i]);
902
903         return 0;
904 }
905
906 static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_kms *kms)
907 {
908         int i;
909         struct dpu_kms *dpu_kms;
910         struct dpu_mdss_cfg *cat;
911         struct dpu_hw_mdp *top;
912
913         dpu_kms = to_dpu_kms(kms);
914
915         cat = dpu_kms->catalog;
916         top = dpu_kms->hw_mdp;
917
918         pm_runtime_get_sync(&dpu_kms->pdev->dev);
919
920         /* dump CTL sub-blocks HW regs info */
921         for (i = 0; i < cat->ctl_count; i++)
922                 msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
923                                 dpu_kms->mmio + cat->ctl[i].base, "ctl_%d", i);
924
925         /* dump DSPP sub-blocks HW regs info */
926         for (i = 0; i < cat->dspp_count; i++)
927                 msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len,
928                                 dpu_kms->mmio + cat->dspp[i].base, "dspp_%d", i);
929
930         /* dump INTF sub-blocks HW regs info */
931         for (i = 0; i < cat->intf_count; i++)
932                 msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
933                                 dpu_kms->mmio + cat->intf[i].base, "intf_%d", i);
934
935         /* dump PP sub-blocks HW regs info */
936         for (i = 0; i < cat->pingpong_count; i++)
937                 msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len,
938                                 dpu_kms->mmio + cat->pingpong[i].base, "pingpong_%d", i);
939
940         /* dump SSPP sub-blocks HW regs info */
941         for (i = 0; i < cat->sspp_count; i++)
942                 msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len,
943                                 dpu_kms->mmio + cat->sspp[i].base, "sspp_%d", i);
944
945         /* dump LM sub-blocks HW regs info */
946         for (i = 0; i < cat->mixer_count; i++)
947                 msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
948                                 dpu_kms->mmio + cat->mixer[i].base, "lm_%d", i);
949
950         /* dump WB sub-blocks HW regs info */
951         for (i = 0; i < cat->wb_count; i++)
952                 msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
953                                 dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
954
955         msm_disp_snapshot_add_block(disp_state, top->hw.length,
956                         dpu_kms->mmio + top->hw.blk_off, "top");
957
958         pm_runtime_put_sync(&dpu_kms->pdev->dev);
959 }
960
961 static const struct msm_kms_funcs kms_funcs = {
962         .hw_init         = dpu_kms_hw_init,
963         .irq_preinstall  = dpu_core_irq_preinstall,
964         .irq_postinstall = dpu_irq_postinstall,
965         .irq_uninstall   = dpu_core_irq_uninstall,
966         .irq             = dpu_core_irq,
967         .enable_commit   = dpu_kms_enable_commit,
968         .disable_commit  = dpu_kms_disable_commit,
969         .vsync_time      = dpu_kms_vsync_time,
970         .prepare_commit  = dpu_kms_prepare_commit,
971         .flush_commit    = dpu_kms_flush_commit,
972         .wait_flush      = dpu_kms_wait_flush,
973         .complete_commit = dpu_kms_complete_commit,
974         .enable_vblank   = dpu_kms_enable_vblank,
975         .disable_vblank  = dpu_kms_disable_vblank,
976         .check_modified_format = dpu_format_check_modified_format,
977         .get_format      = dpu_get_msm_format,
978         .destroy         = dpu_kms_destroy,
979         .snapshot        = dpu_kms_mdp_snapshot,
980 #ifdef CONFIG_DEBUG_FS
981         .debugfs_init    = dpu_kms_debugfs_init,
982 #endif
983 };
984
985 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
986 {
987         struct msm_mmu *mmu;
988
989         if (!dpu_kms->base.aspace)
990                 return;
991
992         mmu = dpu_kms->base.aspace->mmu;
993
994         mmu->funcs->detach(mmu);
995         msm_gem_address_space_put(dpu_kms->base.aspace);
996
997         dpu_kms->base.aspace = NULL;
998 }
999
1000 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
1001 {
1002         struct iommu_domain *domain;
1003         struct msm_gem_address_space *aspace;
1004         struct msm_mmu *mmu;
1005         struct device *dpu_dev = dpu_kms->dev->dev;
1006         struct device *mdss_dev = dpu_dev->parent;
1007
1008         domain = iommu_domain_alloc(&platform_bus_type);
1009         if (!domain)
1010                 return 0;
1011
1012         /* IOMMUs are a part of MDSS device tree binding, not the
1013          * MDP/DPU device. */
1014         mmu = msm_iommu_new(mdss_dev, domain);
1015         if (IS_ERR(mmu)) {
1016                 iommu_domain_free(domain);
1017                 return PTR_ERR(mmu);
1018         }
1019         aspace = msm_gem_address_space_create(mmu, "dpu1",
1020                 0x1000, 0x100000000 - 0x1000);
1021
1022         if (IS_ERR(aspace)) {
1023                 mmu->funcs->destroy(mmu);
1024                 return PTR_ERR(aspace);
1025         }
1026
1027         dpu_kms->base.aspace = aspace;
1028         return 0;
1029 }
1030
1031 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
1032 {
1033         struct clk *clk;
1034
1035         clk = msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, clock_name);
1036         if (!clk)
1037                 return -EINVAL;
1038
1039         return clk_get_rate(clk);
1040 }
1041
1042 static int dpu_kms_hw_init(struct msm_kms *kms)
1043 {
1044         struct dpu_kms *dpu_kms;
1045         struct drm_device *dev;
1046         int i, rc = -EINVAL;
1047
1048         if (!kms) {
1049                 DPU_ERROR("invalid kms\n");
1050                 return rc;
1051         }
1052
1053         dpu_kms = to_dpu_kms(kms);
1054         dev = dpu_kms->dev;
1055
1056         rc = dpu_kms_global_obj_init(dpu_kms);
1057         if (rc)
1058                 return rc;
1059
1060         atomic_set(&dpu_kms->bandwidth_ref, 0);
1061
1062         dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp");
1063         if (IS_ERR(dpu_kms->mmio)) {
1064                 rc = PTR_ERR(dpu_kms->mmio);
1065                 DPU_ERROR("mdp register memory map failed: %d\n", rc);
1066                 dpu_kms->mmio = NULL;
1067                 goto error;
1068         }
1069         DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
1070
1071         dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif");
1072         if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
1073                 rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
1074                 DPU_ERROR("vbif register memory map failed: %d\n", rc);
1075                 dpu_kms->vbif[VBIF_RT] = NULL;
1076                 goto error;
1077         }
1078         dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt");
1079         if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
1080                 dpu_kms->vbif[VBIF_NRT] = NULL;
1081                 DPU_DEBUG("VBIF NRT is not defined");
1082         }
1083
1084         dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma");
1085         if (IS_ERR(dpu_kms->reg_dma)) {
1086                 dpu_kms->reg_dma = NULL;
1087                 DPU_DEBUG("REG_DMA is not defined");
1088         }
1089
1090         dpu_kms_parse_data_bus_icc_path(dpu_kms);
1091
1092         rc = pm_runtime_resume_and_get(&dpu_kms->pdev->dev);
1093         if (rc < 0)
1094                 goto error;
1095
1096         dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
1097
1098         pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
1099
1100         dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
1101         if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
1102                 rc = PTR_ERR(dpu_kms->catalog);
1103                 if (!dpu_kms->catalog)
1104                         rc = -EINVAL;
1105                 DPU_ERROR("catalog init failed: %d\n", rc);
1106                 dpu_kms->catalog = NULL;
1107                 goto power_error;
1108         }
1109
1110         /*
1111          * Now we need to read the HW catalog and initialize resources such as
1112          * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
1113          */
1114         rc = _dpu_kms_mmu_init(dpu_kms);
1115         if (rc) {
1116                 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
1117                 goto power_error;
1118         }
1119
1120         rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
1121         if (rc) {
1122                 DPU_ERROR("rm init failed: %d\n", rc);
1123                 goto power_error;
1124         }
1125
1126         dpu_kms->rm_init = true;
1127
1128         dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
1129                                              dpu_kms->catalog);
1130         if (IS_ERR(dpu_kms->hw_mdp)) {
1131                 rc = PTR_ERR(dpu_kms->hw_mdp);
1132                 DPU_ERROR("failed to get hw_mdp: %d\n", rc);
1133                 dpu_kms->hw_mdp = NULL;
1134                 goto power_error;
1135         }
1136
1137         for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
1138                 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
1139
1140                 dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
1141                                 dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
1142                 if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
1143                         rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
1144                         if (!dpu_kms->hw_vbif[vbif_idx])
1145                                 rc = -EINVAL;
1146                         DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
1147                         dpu_kms->hw_vbif[vbif_idx] = NULL;
1148                         goto power_error;
1149                 }
1150         }
1151
1152         rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
1153                         msm_clk_bulk_get_clock(dpu_kms->clocks, dpu_kms->num_clocks, "core"));
1154         if (rc) {
1155                 DPU_ERROR("failed to init perf %d\n", rc);
1156                 goto perf_err;
1157         }
1158
1159         dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
1160         if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
1161                 rc = PTR_ERR(dpu_kms->hw_intr);
1162                 DPU_ERROR("hw_intr init failed: %d\n", rc);
1163                 dpu_kms->hw_intr = NULL;
1164                 goto hw_intr_init_err;
1165         }
1166
1167         dev->mode_config.min_width = 0;
1168         dev->mode_config.min_height = 0;
1169
1170         /*
1171          * max crtc width is equal to the max mixer width * 2 and max height is
1172          * is 4K
1173          */
1174         dev->mode_config.max_width =
1175                         dpu_kms->catalog->caps->max_mixer_width * 2;
1176         dev->mode_config.max_height = 4096;
1177
1178         dev->max_vblank_count = 0xffffffff;
1179         /* Disable vblank irqs aggressively for power-saving */
1180         dev->vblank_disable_immediate = true;
1181
1182         /*
1183          * _dpu_kms_drm_obj_init should create the DRM related objects
1184          * i.e. CRTCs, planes, encoders, connectors and so forth
1185          */
1186         rc = _dpu_kms_drm_obj_init(dpu_kms);
1187         if (rc) {
1188                 DPU_ERROR("modeset init failed: %d\n", rc);
1189                 goto drm_obj_init_err;
1190         }
1191
1192         dpu_vbif_init_memtypes(dpu_kms);
1193
1194         pm_runtime_put_sync(&dpu_kms->pdev->dev);
1195
1196         return 0;
1197
1198 drm_obj_init_err:
1199         dpu_core_perf_destroy(&dpu_kms->perf);
1200 hw_intr_init_err:
1201 perf_err:
1202 power_error:
1203         pm_runtime_put_sync(&dpu_kms->pdev->dev);
1204 error:
1205         _dpu_kms_hw_destroy(dpu_kms);
1206
1207         return rc;
1208 }
1209
1210 static int dpu_kms_init(struct drm_device *ddev)
1211 {
1212         struct msm_drm_private *priv = ddev->dev_private;
1213         struct device *dev = ddev->dev;
1214         struct platform_device *pdev = to_platform_device(dev);
1215         struct dpu_kms *dpu_kms;
1216         int irq;
1217         struct dev_pm_opp *opp;
1218         int ret = 0;
1219         unsigned long max_freq = ULONG_MAX;
1220
1221         dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1222         if (!dpu_kms)
1223                 return -ENOMEM;
1224
1225         ret = devm_pm_opp_set_clkname(dev, "core");
1226         if (ret)
1227                 return ret;
1228         /* OPP table is optional */
1229         ret = devm_pm_opp_of_add_table(dev);
1230         if (ret && ret != -ENODEV) {
1231                 dev_err(dev, "invalid OPP table in device tree\n");
1232                 return ret;
1233         }
1234
1235         ret = devm_clk_bulk_get_all(&pdev->dev, &dpu_kms->clocks);
1236         if (ret < 0) {
1237                 DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1238                 return ret;
1239         }
1240         dpu_kms->num_clocks = ret;
1241
1242         opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1243         if (!IS_ERR(opp))
1244                 dev_pm_opp_put(opp);
1245
1246         dev_pm_opp_set_rate(dev, max_freq);
1247
1248         ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
1249         if (ret) {
1250                 DPU_ERROR("failed to init kms, ret=%d\n", ret);
1251                 return ret;
1252         }
1253         dpu_kms->dev = ddev;
1254         dpu_kms->pdev = pdev;
1255
1256         pm_runtime_enable(&pdev->dev);
1257         dpu_kms->rpm_enabled = true;
1258
1259         priv->kms = &dpu_kms->base;
1260
1261         irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1262         if (!irq) {
1263                 DPU_ERROR("failed to get irq\n");
1264                 return -EINVAL;
1265         }
1266         dpu_kms->base.irq = irq;
1267
1268         return 0;
1269 }
1270
1271 static int dpu_dev_probe(struct platform_device *pdev)
1272 {
1273         return msm_drv_probe(&pdev->dev, dpu_kms_init);
1274 }
1275
1276 static int dpu_dev_remove(struct platform_device *pdev)
1277 {
1278         component_master_del(&pdev->dev, &msm_drm_ops);
1279
1280         return 0;
1281 }
1282
1283 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1284 {
1285         int i;
1286         struct platform_device *pdev = to_platform_device(dev);
1287         struct msm_drm_private *priv = platform_get_drvdata(pdev);
1288         struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1289
1290         /* Drop the performance state vote */
1291         dev_pm_opp_set_rate(dev, 0);
1292         clk_bulk_disable_unprepare(dpu_kms->num_clocks, dpu_kms->clocks);
1293
1294         for (i = 0; i < dpu_kms->num_paths; i++)
1295                 icc_set_bw(dpu_kms->path[i], 0, 0);
1296
1297         return 0;
1298 }
1299
1300 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1301 {
1302         int rc = -1;
1303         struct platform_device *pdev = to_platform_device(dev);
1304         struct msm_drm_private *priv = platform_get_drvdata(pdev);
1305         struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1306         struct drm_encoder *encoder;
1307         struct drm_device *ddev;
1308         int i;
1309
1310         ddev = dpu_kms->dev;
1311
1312         WARN_ON(!(dpu_kms->num_paths));
1313         /* Min vote of BW is required before turning on AXI clk */
1314         for (i = 0; i < dpu_kms->num_paths; i++)
1315                 icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
1316
1317         rc = clk_bulk_prepare_enable(dpu_kms->num_clocks, dpu_kms->clocks);
1318         if (rc) {
1319                 DPU_ERROR("clock enable failed rc:%d\n", rc);
1320                 return rc;
1321         }
1322
1323         dpu_vbif_init_memtypes(dpu_kms);
1324
1325         drm_for_each_encoder(encoder, ddev)
1326                 dpu_encoder_virt_runtime_resume(encoder);
1327
1328         return rc;
1329 }
1330
1331 static const struct dev_pm_ops dpu_pm_ops = {
1332         SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1333         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1334                                 pm_runtime_force_resume)
1335         .prepare = msm_pm_prepare,
1336         .complete = msm_pm_complete,
1337 };
1338
1339 static const struct of_device_id dpu_dt_match[] = {
1340         { .compatible = "qcom,msm8998-dpu", },
1341         { .compatible = "qcom,qcm2290-dpu", },
1342         { .compatible = "qcom,sdm845-dpu", },
1343         { .compatible = "qcom,sc7180-dpu", },
1344         { .compatible = "qcom,sc7280-dpu", },
1345         { .compatible = "qcom,sc8180x-dpu", },
1346         { .compatible = "qcom,sm8150-dpu", },
1347         { .compatible = "qcom,sm8250-dpu", },
1348         {}
1349 };
1350 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1351
1352 static struct platform_driver dpu_driver = {
1353         .probe = dpu_dev_probe,
1354         .remove = dpu_dev_remove,
1355         .shutdown = msm_drv_shutdown,
1356         .driver = {
1357                 .name = "msm_dpu",
1358                 .of_match_table = dpu_dt_match,
1359                 .pm = &dpu_pm_ops,
1360         },
1361 };
1362
1363 void __init msm_dpu_register(void)
1364 {
1365         platform_driver_register(&dpu_driver);
1366 }
1367
1368 void __exit msm_dpu_unregister(void)
1369 {
1370         platform_driver_unregister(&dpu_driver);
1371 }