1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
10 #include <linux/debugfs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/of_irq.h>
13 #include <linux/pm_opp.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_vblank.h>
24 #include "dpu_core_irq.h"
25 #include "dpu_formats.h"
26 #include "dpu_hw_vbif.h"
28 #include "dpu_encoder.h"
29 #include "dpu_plane.h"
32 #define CREATE_TRACE_POINTS
33 #include "dpu_trace.h"
36 * To enable overall DRM driver logging
37 * # echo 0x2 > /sys/module/drm/parameters/debug
39 * To enable DRM driver h/w logging
40 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
42 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
44 #define DPU_DEBUGFS_DIR "msm_dpu"
45 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
47 #define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
49 static int dpu_kms_hw_init(struct msm_kms *kms);
50 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
52 #ifdef CONFIG_DEBUG_FS
53 static int _dpu_danger_signal_status(struct seq_file *s,
56 struct dpu_kms *kms = (struct dpu_kms *)s->private;
57 struct dpu_danger_safe_status status;
61 DPU_ERROR("invalid arg(s)\n");
65 memset(&status, 0, sizeof(struct dpu_danger_safe_status));
67 pm_runtime_get_sync(&kms->pdev->dev);
69 seq_puts(s, "\nDanger signal status:\n");
70 if (kms->hw_mdp->ops.get_danger_status)
71 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
74 seq_puts(s, "\nSafe signal status:\n");
75 if (kms->hw_mdp->ops.get_danger_status)
76 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
79 pm_runtime_put_sync(&kms->pdev->dev);
81 seq_printf(s, "MDP : 0x%x\n", status.mdp);
83 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
84 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
91 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
93 return _dpu_danger_signal_status(s, true);
95 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
97 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
99 return _dpu_danger_signal_status(s, false);
101 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
103 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
104 struct dentry *parent)
106 struct dentry *entry = debugfs_create_dir("danger", parent);
108 debugfs_create_file("danger_status", 0600, entry,
109 dpu_kms, &dpu_debugfs_danger_stats_fops);
110 debugfs_create_file("safe_status", 0600, entry,
111 dpu_kms, &dpu_debugfs_safe_stats_fops);
114 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
116 struct dpu_debugfs_regset32 *regset = s->private;
117 struct dpu_kms *dpu_kms = regset->dpu_kms;
124 base = dpu_kms->mmio + regset->offset;
126 /* insert padding spaces, if needed */
127 if (regset->offset & 0xF) {
128 seq_printf(s, "[%x]", regset->offset & ~0xF);
129 for (i = 0; i < (regset->offset & 0xF); i += 4)
133 pm_runtime_get_sync(&dpu_kms->pdev->dev);
135 /* main register output */
136 for (i = 0; i < regset->blk_len; i += 4) {
137 addr = regset->offset + i;
138 if ((addr & 0xF) == 0x0)
139 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
140 seq_printf(s, " %08x", readl_relaxed(base + i));
143 pm_runtime_put_sync(&dpu_kms->pdev->dev);
148 static int dpu_debugfs_open_regset32(struct inode *inode,
151 return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
154 static const struct file_operations dpu_fops_regset32 = {
155 .open = dpu_debugfs_open_regset32,
158 .release = single_release,
161 void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
162 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
165 regset->offset = offset;
166 regset->blk_len = length;
167 regset->dpu_kms = dpu_kms;
171 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
172 void *parent, struct dpu_debugfs_regset32 *regset)
174 if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
177 /* make sure offset is a multiple of 4 */
178 regset->offset = round_down(regset->offset, 4);
180 debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
183 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
185 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
186 void *p = dpu_hw_util_get_log_mask_ptr();
187 struct dentry *entry;
188 struct drm_device *dev;
189 struct msm_drm_private *priv;
195 priv = dev->dev_private;
197 entry = debugfs_create_dir("debug", minor->debugfs_root);
199 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
201 dpu_debugfs_danger_init(dpu_kms, entry);
202 dpu_debugfs_vbif_init(dpu_kms, entry);
203 dpu_debugfs_core_irq_init(dpu_kms, entry);
206 msm_dp_debugfs_init(priv->dp, minor);
208 return dpu_core_perf_debugfs_init(dpu_kms, entry);
212 /* Global/shared object state funcs */
215 * This is a helper that returns the private state currently in operation.
216 * Note that this would return the "old_state" if called in the atomic check
217 * path, and the "new_state" after the atomic swap has been done.
219 struct dpu_global_state *
220 dpu_kms_get_existing_global_state(struct dpu_kms *dpu_kms)
222 return to_dpu_global_state(dpu_kms->global_state.state);
226 * This acquires the modeset lock set aside for global state, creates
227 * a new duplicated private object state.
229 struct dpu_global_state *dpu_kms_get_global_state(struct drm_atomic_state *s)
231 struct msm_drm_private *priv = s->dev->dev_private;
232 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
233 struct drm_private_state *priv_state;
236 ret = drm_modeset_lock(&dpu_kms->global_state_lock, s->acquire_ctx);
240 priv_state = drm_atomic_get_private_obj_state(s,
241 &dpu_kms->global_state);
242 if (IS_ERR(priv_state))
243 return ERR_CAST(priv_state);
245 return to_dpu_global_state(priv_state);
248 static struct drm_private_state *
249 dpu_kms_global_duplicate_state(struct drm_private_obj *obj)
251 struct dpu_global_state *state;
253 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
257 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
262 static void dpu_kms_global_destroy_state(struct drm_private_obj *obj,
263 struct drm_private_state *state)
265 struct dpu_global_state *dpu_state = to_dpu_global_state(state);
270 static const struct drm_private_state_funcs dpu_kms_global_state_funcs = {
271 .atomic_duplicate_state = dpu_kms_global_duplicate_state,
272 .atomic_destroy_state = dpu_kms_global_destroy_state,
275 static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
277 struct dpu_global_state *state;
279 drm_modeset_lock_init(&dpu_kms->global_state_lock);
281 state = kzalloc(sizeof(*state), GFP_KERNEL);
285 drm_atomic_private_obj_init(dpu_kms->dev, &dpu_kms->global_state,
287 &dpu_kms_global_state_funcs);
291 static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
293 struct icc_path *path0;
294 struct icc_path *path1;
295 struct drm_device *dev = dpu_kms->dev;
297 path0 = of_icc_get(dev->dev, "mdp0-mem");
298 path1 = of_icc_get(dev->dev, "mdp1-mem");
300 if (IS_ERR_OR_NULL(path0))
301 return PTR_ERR_OR_ZERO(path0);
303 dpu_kms->path[0] = path0;
304 dpu_kms->num_paths = 1;
306 if (!IS_ERR_OR_NULL(path1)) {
307 dpu_kms->path[1] = path1;
308 dpu_kms->num_paths++;
313 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
315 return dpu_crtc_vblank(crtc, true);
318 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
320 dpu_crtc_vblank(crtc, false);
323 static void dpu_kms_enable_commit(struct msm_kms *kms)
325 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
326 pm_runtime_get_sync(&dpu_kms->pdev->dev);
329 static void dpu_kms_disable_commit(struct msm_kms *kms)
331 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
332 pm_runtime_put_sync(&dpu_kms->pdev->dev);
335 static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
337 struct drm_encoder *encoder;
339 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
342 if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
349 static void dpu_kms_prepare_commit(struct msm_kms *kms,
350 struct drm_atomic_state *state)
352 struct drm_crtc *crtc;
353 struct drm_crtc_state *crtc_state;
354 struct drm_encoder *encoder;
360 /* Call prepare_commit for all affected encoders */
361 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
362 drm_for_each_encoder_mask(encoder, crtc->dev,
363 crtc_state->encoder_mask) {
364 dpu_encoder_prepare_commit(encoder);
369 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
371 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
372 struct drm_crtc *crtc;
374 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
375 if (!crtc->state->active)
378 trace_dpu_kms_commit(DRMID(crtc));
379 dpu_crtc_commit_kickoff(crtc);
384 * Override the encoder enable since we need to setup the inline rotator and do
385 * some crtc magic before enabling any bridge that might be present.
387 void dpu_kms_encoder_enable(struct drm_encoder *encoder)
389 const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
390 struct drm_device *dev = encoder->dev;
391 struct drm_crtc *crtc;
393 /* Forward this enable call to the commit hook */
394 if (funcs && funcs->commit)
395 funcs->commit(encoder);
397 drm_for_each_crtc(crtc, dev) {
398 if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
401 trace_dpu_kms_enc_enable(DRMID(crtc));
405 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
407 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
408 struct drm_crtc *crtc;
410 DPU_ATRACE_BEGIN("kms_complete_commit");
412 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
413 dpu_crtc_complete_commit(crtc);
415 DPU_ATRACE_END("kms_complete_commit");
418 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
419 struct drm_crtc *crtc)
421 struct drm_encoder *encoder;
422 struct drm_device *dev;
425 if (!kms || !crtc || !crtc->state) {
426 DPU_ERROR("invalid params\n");
432 if (!crtc->state->enable) {
433 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
437 if (!crtc->state->active) {
438 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
442 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
443 if (encoder->crtc != crtc)
446 * Wait for post-flush if necessary to delay before
447 * plane_cleanup. For example, wait for vsync in case of video
448 * mode panels. This may be a no-op for command mode panels.
450 trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
451 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
452 if (ret && ret != -EWOULDBLOCK) {
453 DPU_ERROR("wait for commit done returned %d\n", ret);
459 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
461 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
462 struct drm_crtc *crtc;
464 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
465 dpu_kms_wait_for_commit_done(kms, crtc);
468 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
469 struct msm_drm_private *priv,
470 struct dpu_kms *dpu_kms)
472 struct drm_encoder *encoder = NULL;
475 if (!(priv->dsi[0] || priv->dsi[1]))
478 /*TODO: Support two independent DSI connectors */
479 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
480 if (IS_ERR(encoder)) {
481 DPU_ERROR("encoder init failed for dsi display\n");
482 return PTR_ERR(encoder);
485 priv->encoders[priv->num_encoders++] = encoder;
487 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
491 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
493 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
502 static int _dpu_kms_initialize_displayport(struct drm_device *dev,
503 struct msm_drm_private *priv,
504 struct dpu_kms *dpu_kms)
506 struct drm_encoder *encoder = NULL;
512 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
513 if (IS_ERR(encoder)) {
514 DPU_ERROR("encoder init failed for dsi display\n");
515 return PTR_ERR(encoder);
518 rc = msm_dp_modeset_init(priv->dp, dev, encoder);
520 DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
521 drm_encoder_cleanup(encoder);
525 priv->encoders[priv->num_encoders++] = encoder;
530 * _dpu_kms_setup_displays - create encoders, bridges and connectors
531 * for underlying displays
532 * @dev: Pointer to drm device structure
533 * @priv: Pointer to private drm device data
534 * @dpu_kms: Pointer to dpu kms structure
535 * Returns: Zero on success
537 static int _dpu_kms_setup_displays(struct drm_device *dev,
538 struct msm_drm_private *priv,
539 struct dpu_kms *dpu_kms)
543 rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
545 DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
549 rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
551 DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
558 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
560 struct msm_drm_private *priv;
563 priv = dpu_kms->dev->dev_private;
565 for (i = 0; i < priv->num_crtcs; i++)
566 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
569 for (i = 0; i < priv->num_planes; i++)
570 priv->planes[i]->funcs->destroy(priv->planes[i]);
571 priv->num_planes = 0;
573 for (i = 0; i < priv->num_connectors; i++)
574 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
575 priv->num_connectors = 0;
577 for (i = 0; i < priv->num_encoders; i++)
578 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
579 priv->num_encoders = 0;
582 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
584 struct drm_device *dev;
585 struct drm_plane *primary_planes[MAX_PLANES], *plane;
586 struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
587 struct drm_crtc *crtc;
589 struct msm_drm_private *priv;
590 struct dpu_mdss_cfg *catalog;
592 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
595 priv = dev->dev_private;
596 catalog = dpu_kms->catalog;
599 * Create encoder and query display drivers to create
600 * bridges and connectors
602 ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
606 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
608 /* Create the planes, keeping track of one primary/cursor per crtc */
609 for (i = 0; i < catalog->sspp_count; i++) {
610 enum drm_plane_type type;
612 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
613 && cursor_planes_idx < max_crtc_count)
614 type = DRM_PLANE_TYPE_CURSOR;
615 else if (primary_planes_idx < max_crtc_count)
616 type = DRM_PLANE_TYPE_PRIMARY;
618 type = DRM_PLANE_TYPE_OVERLAY;
620 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
621 type, catalog->sspp[i].features,
622 catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
624 plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
625 (1UL << max_crtc_count) - 1, 0);
627 DPU_ERROR("dpu_plane_init failed\n");
628 ret = PTR_ERR(plane);
631 priv->planes[priv->num_planes++] = plane;
633 if (type == DRM_PLANE_TYPE_CURSOR)
634 cursor_planes[cursor_planes_idx++] = plane;
635 else if (type == DRM_PLANE_TYPE_PRIMARY)
636 primary_planes[primary_planes_idx++] = plane;
639 max_crtc_count = min(max_crtc_count, primary_planes_idx);
641 /* Create one CRTC per encoder */
642 for (i = 0; i < max_crtc_count; i++) {
643 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
648 priv->crtcs[priv->num_crtcs++] = crtc;
651 /* All CRTCs are compatible with all encoders */
652 for (i = 0; i < priv->num_encoders; i++)
653 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
657 _dpu_kms_drm_obj_destroy(dpu_kms);
661 static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
662 struct drm_encoder *encoder)
667 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
671 if (dpu_kms->hw_intr)
672 dpu_hw_intr_destroy(dpu_kms->hw_intr);
673 dpu_kms->hw_intr = NULL;
675 /* safe to call these more than once during shutdown */
676 _dpu_kms_mmu_destroy(dpu_kms);
678 if (dpu_kms->catalog) {
679 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
680 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
682 if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
683 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
687 if (dpu_kms->rm_init)
688 dpu_rm_destroy(&dpu_kms->rm);
689 dpu_kms->rm_init = false;
691 if (dpu_kms->catalog)
692 dpu_hw_catalog_deinit(dpu_kms->catalog);
693 dpu_kms->catalog = NULL;
695 if (dpu_kms->vbif[VBIF_NRT])
696 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
697 dpu_kms->vbif[VBIF_NRT] = NULL;
699 if (dpu_kms->vbif[VBIF_RT])
700 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
701 dpu_kms->vbif[VBIF_RT] = NULL;
704 dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
705 dpu_kms->hw_mdp = NULL;
708 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
709 dpu_kms->mmio = NULL;
712 static void dpu_kms_destroy(struct msm_kms *kms)
714 struct dpu_kms *dpu_kms;
717 DPU_ERROR("invalid kms\n");
721 dpu_kms = to_dpu_kms(kms);
723 _dpu_kms_hw_destroy(dpu_kms);
725 msm_kms_destroy(&dpu_kms->base);
728 static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
729 struct drm_encoder *encoder,
732 struct msm_display_info info;
733 struct msm_drm_private *priv = encoder->dev->dev_private;
736 memset(&info, 0, sizeof(info));
738 info.intf_type = encoder->encoder_type;
739 info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
740 MSM_DISPLAY_CAP_VID_MODE;
742 switch (info.intf_type) {
743 case DRM_MODE_ENCODER_DSI:
744 /* TODO: No support for DSI swap */
745 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
747 info.h_tile_instance[info.num_of_h_tiles] = i;
748 info.num_of_h_tiles++;
752 case DRM_MODE_ENCODER_TMDS:
753 info.num_of_h_tiles = 1;
757 rc = dpu_encoder_setup(encoder->dev, encoder, &info);
759 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
760 encoder->base.id, rc);
763 static irqreturn_t dpu_irq(struct msm_kms *kms)
765 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
767 return dpu_core_irq(dpu_kms);
770 static void dpu_irq_preinstall(struct msm_kms *kms)
772 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
774 dpu_core_irq_preinstall(dpu_kms);
777 static int dpu_irq_postinstall(struct msm_kms *kms)
779 struct msm_drm_private *priv;
780 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
782 if (!dpu_kms || !dpu_kms->dev)
785 priv = dpu_kms->dev->dev_private;
789 msm_dp_irq_postinstall(priv->dp);
794 static void dpu_irq_uninstall(struct msm_kms *kms)
796 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
798 dpu_core_irq_uninstall(dpu_kms);
801 static const struct msm_kms_funcs kms_funcs = {
802 .hw_init = dpu_kms_hw_init,
803 .irq_preinstall = dpu_irq_preinstall,
804 .irq_postinstall = dpu_irq_postinstall,
805 .irq_uninstall = dpu_irq_uninstall,
807 .enable_commit = dpu_kms_enable_commit,
808 .disable_commit = dpu_kms_disable_commit,
809 .vsync_time = dpu_kms_vsync_time,
810 .prepare_commit = dpu_kms_prepare_commit,
811 .flush_commit = dpu_kms_flush_commit,
812 .wait_flush = dpu_kms_wait_flush,
813 .complete_commit = dpu_kms_complete_commit,
814 .enable_vblank = dpu_kms_enable_vblank,
815 .disable_vblank = dpu_kms_disable_vblank,
816 .check_modified_format = dpu_format_check_modified_format,
817 .get_format = dpu_get_msm_format,
818 .round_pixclk = dpu_kms_round_pixclk,
819 .destroy = dpu_kms_destroy,
820 .set_encoder_mode = _dpu_kms_set_encoder_mode,
821 #ifdef CONFIG_DEBUG_FS
822 .debugfs_init = dpu_kms_debugfs_init,
826 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
830 if (!dpu_kms->base.aspace)
833 mmu = dpu_kms->base.aspace->mmu;
835 mmu->funcs->detach(mmu);
836 msm_gem_address_space_put(dpu_kms->base.aspace);
838 dpu_kms->base.aspace = NULL;
841 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
843 struct iommu_domain *domain;
844 struct msm_gem_address_space *aspace;
847 domain = iommu_domain_alloc(&platform_bus_type);
851 mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
852 aspace = msm_gem_address_space_create(mmu, "dpu1",
853 0x1000, 0x100000000 - 0x1000);
855 if (IS_ERR(aspace)) {
856 mmu->funcs->destroy(mmu);
857 return PTR_ERR(aspace);
860 dpu_kms->base.aspace = aspace;
864 static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
867 struct dss_module_power *mp = &dpu_kms->mp;
870 for (i = 0; i < mp->num_clk; i++) {
871 if (!strcmp(mp->clk_config[i].clk_name, clock_name))
872 return &mp->clk_config[i];
878 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
882 clk = _dpu_kms_get_clk(dpu_kms, clock_name);
886 return clk_get_rate(clk->clk);
889 static int dpu_kms_hw_init(struct msm_kms *kms)
891 struct dpu_kms *dpu_kms;
892 struct drm_device *dev;
896 DPU_ERROR("invalid kms\n");
900 dpu_kms = to_dpu_kms(kms);
903 rc = dpu_kms_global_obj_init(dpu_kms);
907 atomic_set(&dpu_kms->bandwidth_ref, 0);
909 dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
910 if (IS_ERR(dpu_kms->mmio)) {
911 rc = PTR_ERR(dpu_kms->mmio);
912 DPU_ERROR("mdp register memory map failed: %d\n", rc);
913 dpu_kms->mmio = NULL;
916 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
918 dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
919 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
920 rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
921 DPU_ERROR("vbif register memory map failed: %d\n", rc);
922 dpu_kms->vbif[VBIF_RT] = NULL;
925 dpu_kms->vbif[VBIF_NRT] = msm_ioremap_quiet(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
926 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
927 dpu_kms->vbif[VBIF_NRT] = NULL;
928 DPU_DEBUG("VBIF NRT is not defined");
931 dpu_kms->reg_dma = msm_ioremap_quiet(dpu_kms->pdev, "regdma", "regdma");
932 if (IS_ERR(dpu_kms->reg_dma)) {
933 dpu_kms->reg_dma = NULL;
934 DPU_DEBUG("REG_DMA is not defined");
937 dpu_kms_parse_data_bus_icc_path(dpu_kms);
939 pm_runtime_get_sync(&dpu_kms->pdev->dev);
941 dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
943 pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
945 dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
946 if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
947 rc = PTR_ERR(dpu_kms->catalog);
948 if (!dpu_kms->catalog)
950 DPU_ERROR("catalog init failed: %d\n", rc);
951 dpu_kms->catalog = NULL;
956 * Now we need to read the HW catalog and initialize resources such as
957 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
959 rc = _dpu_kms_mmu_init(dpu_kms);
961 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
965 rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
967 DPU_ERROR("rm init failed: %d\n", rc);
971 dpu_kms->rm_init = true;
973 dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
975 if (IS_ERR(dpu_kms->hw_mdp)) {
976 rc = PTR_ERR(dpu_kms->hw_mdp);
977 DPU_ERROR("failed to get hw_mdp: %d\n", rc);
978 dpu_kms->hw_mdp = NULL;
982 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
983 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
985 dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
986 dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
987 if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
988 rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
989 if (!dpu_kms->hw_vbif[vbif_idx])
991 DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
992 dpu_kms->hw_vbif[vbif_idx] = NULL;
997 rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
998 _dpu_kms_get_clk(dpu_kms, "core"));
1000 DPU_ERROR("failed to init perf %d\n", rc);
1004 dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
1005 if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
1006 rc = PTR_ERR(dpu_kms->hw_intr);
1007 DPU_ERROR("hw_intr init failed: %d\n", rc);
1008 dpu_kms->hw_intr = NULL;
1009 goto hw_intr_init_err;
1012 dev->mode_config.min_width = 0;
1013 dev->mode_config.min_height = 0;
1016 * max crtc width is equal to the max mixer width * 2 and max height is
1019 dev->mode_config.max_width =
1020 dpu_kms->catalog->caps->max_mixer_width * 2;
1021 dev->mode_config.max_height = 4096;
1024 * Support format modifiers for compression etc.
1026 dev->mode_config.allow_fb_modifiers = true;
1028 dev->max_vblank_count = 0xffffffff;
1029 /* Disable vblank irqs aggressively for power-saving */
1030 dev->vblank_disable_immediate = true;
1033 * _dpu_kms_drm_obj_init should create the DRM related objects
1034 * i.e. CRTCs, planes, encoders, connectors and so forth
1036 rc = _dpu_kms_drm_obj_init(dpu_kms);
1038 DPU_ERROR("modeset init failed: %d\n", rc);
1039 goto drm_obj_init_err;
1042 dpu_vbif_init_memtypes(dpu_kms);
1044 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1049 dpu_core_perf_destroy(&dpu_kms->perf);
1053 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1055 _dpu_kms_hw_destroy(dpu_kms);
1060 struct msm_kms *dpu_kms_init(struct drm_device *dev)
1062 struct msm_drm_private *priv;
1063 struct dpu_kms *dpu_kms;
1067 DPU_ERROR("drm device node invalid\n");
1068 return ERR_PTR(-EINVAL);
1071 priv = dev->dev_private;
1072 dpu_kms = to_dpu_kms(priv->kms);
1074 irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
1076 DPU_ERROR("failed to get irq: %d\n", irq);
1077 return ERR_PTR(irq);
1079 dpu_kms->base.irq = irq;
1081 return &dpu_kms->base;
1084 static int dpu_bind(struct device *dev, struct device *master, void *data)
1086 struct drm_device *ddev = dev_get_drvdata(master);
1087 struct platform_device *pdev = to_platform_device(dev);
1088 struct msm_drm_private *priv = ddev->dev_private;
1089 struct dpu_kms *dpu_kms;
1090 struct dss_module_power *mp;
1093 dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
1097 dpu_kms->opp_table = dev_pm_opp_set_clkname(dev, "core");
1098 if (IS_ERR(dpu_kms->opp_table))
1099 return PTR_ERR(dpu_kms->opp_table);
1100 /* OPP table is optional */
1101 ret = dev_pm_opp_of_add_table(dev);
1102 if (ret && ret != -ENODEV) {
1103 dev_err(dev, "invalid OPP table in device tree\n");
1108 ret = msm_dss_parse_clock(pdev, mp);
1110 DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
1114 platform_set_drvdata(pdev, dpu_kms);
1116 ret = msm_kms_init(&dpu_kms->base, &kms_funcs);
1118 DPU_ERROR("failed to init kms, ret=%d\n", ret);
1121 dpu_kms->dev = ddev;
1122 dpu_kms->pdev = pdev;
1124 pm_runtime_enable(&pdev->dev);
1125 dpu_kms->rpm_enabled = true;
1127 priv->kms = &dpu_kms->base;
1130 dev_pm_opp_of_remove_table(dev);
1132 dev_pm_opp_put_clkname(dpu_kms->opp_table);
1136 static void dpu_unbind(struct device *dev, struct device *master, void *data)
1138 struct platform_device *pdev = to_platform_device(dev);
1139 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1140 struct dss_module_power *mp = &dpu_kms->mp;
1142 msm_dss_put_clk(mp->clk_config, mp->num_clk);
1143 devm_kfree(&pdev->dev, mp->clk_config);
1146 if (dpu_kms->rpm_enabled)
1147 pm_runtime_disable(&pdev->dev);
1149 dev_pm_opp_of_remove_table(dev);
1150 dev_pm_opp_put_clkname(dpu_kms->opp_table);
1153 static const struct component_ops dpu_ops = {
1155 .unbind = dpu_unbind,
1158 static int dpu_dev_probe(struct platform_device *pdev)
1160 return component_add(&pdev->dev, &dpu_ops);
1163 static int dpu_dev_remove(struct platform_device *pdev)
1165 component_del(&pdev->dev, &dpu_ops);
1169 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1172 struct platform_device *pdev = to_platform_device(dev);
1173 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1174 struct dss_module_power *mp = &dpu_kms->mp;
1176 /* Drop the performance state vote */
1177 dev_pm_opp_set_rate(dev, 0);
1178 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
1180 DPU_ERROR("clock disable failed rc:%d\n", rc);
1182 for (i = 0; i < dpu_kms->num_paths; i++)
1183 icc_set_bw(dpu_kms->path[i], 0, 0);
1188 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1191 struct platform_device *pdev = to_platform_device(dev);
1192 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1193 struct drm_encoder *encoder;
1194 struct drm_device *ddev;
1195 struct dss_module_power *mp = &dpu_kms->mp;
1198 ddev = dpu_kms->dev;
1200 WARN_ON(!(dpu_kms->num_paths));
1201 /* Min vote of BW is required before turning on AXI clk */
1202 for (i = 0; i < dpu_kms->num_paths; i++)
1203 icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
1205 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
1207 DPU_ERROR("clock enable failed rc:%d\n", rc);
1211 dpu_vbif_init_memtypes(dpu_kms);
1213 drm_for_each_encoder(encoder, ddev)
1214 dpu_encoder_virt_runtime_resume(encoder);
1219 static const struct dev_pm_ops dpu_pm_ops = {
1220 SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1221 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1222 pm_runtime_force_resume)
1225 static const struct of_device_id dpu_dt_match[] = {
1226 { .compatible = "qcom,sdm845-dpu", },
1227 { .compatible = "qcom,sc7180-dpu", },
1228 { .compatible = "qcom,sc7280-dpu", },
1229 { .compatible = "qcom,sm8150-dpu", },
1230 { .compatible = "qcom,sm8250-dpu", },
1233 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1235 static struct platform_driver dpu_driver = {
1236 .probe = dpu_dev_probe,
1237 .remove = dpu_dev_remove,
1240 .of_match_table = dpu_dt_match,
1245 void __init msm_dpu_register(void)
1247 platform_driver_register(&dpu_driver);
1250 void __exit msm_dpu_unregister(void)
1252 platform_driver_unregister(&dpu_driver);