1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
5 * Copyright (C) 2013 Red Hat
6 * Author: Rob Clark <robdclark@gmail.com>
9 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
10 #include <linux/sort.h>
11 #include <linux/debugfs.h>
12 #include <linux/ktime.h>
13 #include <linux/bits.h>
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_blend.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_flip_work.h>
19 #include <drm/drm_framebuffer.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_probe_helper.h>
22 #include <drm/drm_rect.h>
23 #include <drm/drm_vblank.h>
24 #include <drm/drm_self_refresh_helper.h>
27 #include "dpu_hw_lm.h"
28 #include "dpu_hw_ctl.h"
29 #include "dpu_hw_dspp.h"
31 #include "dpu_plane.h"
32 #include "dpu_encoder.h"
34 #include "dpu_core_perf.h"
35 #include "dpu_trace.h"
37 /* layer mixer index on dpu_crtc */
41 /* timeout in ms waiting for frame done */
42 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
44 #define CONVERT_S3_15(val) \
45 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
47 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
49 struct msm_drm_private *priv = crtc->dev->dev_private;
51 return to_dpu_kms(priv->kms);
54 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
56 struct drm_device *dev = crtc->dev;
57 struct drm_encoder *encoder;
59 drm_for_each_encoder(encoder, dev)
60 if (encoder->crtc == crtc)
66 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
69 !strcmp(src_name, "none"))
70 return DPU_CRTC_CRC_SOURCE_NONE;
71 if (!strcmp(src_name, "auto") ||
72 !strcmp(src_name, "lm"))
73 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
74 if (!strcmp(src_name, "encoder"))
75 return DPU_CRTC_CRC_SOURCE_ENCODER;
77 return DPU_CRTC_CRC_SOURCE_INVALID;
80 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
81 const char *src_name, size_t *values_cnt)
83 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
84 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
87 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
91 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER) {
92 *values_cnt = crtc_state->num_mixers;
93 } else if (source == DPU_CRTC_CRC_SOURCE_ENCODER) {
94 struct drm_encoder *drm_enc;
98 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
99 *values_cnt += dpu_encoder_get_crc_values_cnt(drm_enc);
105 static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state)
107 struct dpu_crtc_mixer *m;
110 for (i = 0; i < crtc_state->num_mixers; ++i) {
111 m = &crtc_state->mixers[i];
113 if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
116 /* Calculate MISR over 1 frame */
117 m->hw_lm->ops.setup_misr(m->hw_lm);
121 static void dpu_crtc_setup_encoder_misr(struct drm_crtc *crtc)
123 struct drm_encoder *drm_enc;
125 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask)
126 dpu_encoder_setup_misr(drm_enc);
129 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
131 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
132 enum dpu_crtc_crc_source current_source;
133 struct dpu_crtc_state *crtc_state;
134 struct drm_device *drm_dev = crtc->dev;
141 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
145 ret = drm_modeset_lock(&crtc->mutex, NULL);
150 enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
151 crtc_state = to_dpu_crtc_state(crtc->state);
153 spin_lock_irq(&drm_dev->event_lock);
154 current_source = crtc_state->crc_source;
155 spin_unlock_irq(&drm_dev->event_lock);
157 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
159 if (!was_enabled && enable) {
160 ret = drm_crtc_vblank_get(crtc);
165 } else if (was_enabled && !enable) {
166 drm_crtc_vblank_put(crtc);
169 spin_lock_irq(&drm_dev->event_lock);
170 crtc_state->crc_source = source;
171 spin_unlock_irq(&drm_dev->event_lock);
173 crtc_state->crc_frame_skip_count = 0;
175 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
176 dpu_crtc_setup_lm_misr(crtc_state);
177 else if (source == DPU_CRTC_CRC_SOURCE_ENCODER)
178 dpu_crtc_setup_encoder_misr(crtc);
183 drm_modeset_unlock(&crtc->mutex);
188 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
190 struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
192 DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
196 return dpu_encoder_get_vsync_count(encoder);
199 static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
200 struct dpu_crtc_state *crtc_state)
202 struct dpu_crtc_mixer *m;
203 u32 crcs[CRTC_DUAL_MIXERS];
208 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
210 for (i = 0; i < crtc_state->num_mixers; ++i) {
212 m = &crtc_state->mixers[i];
214 if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
217 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
221 DRM_DEBUG_DRIVER("MISR read failed\n");
226 return drm_crtc_add_crc_entry(crtc, true,
227 drm_crtc_accurate_vblank_count(crtc), crcs);
230 static int dpu_crtc_get_encoder_crc(struct drm_crtc *crtc)
232 struct drm_encoder *drm_enc;
236 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc->state->encoder_mask) {
237 rc = dpu_encoder_get_crc(drm_enc, crcs, pos);
240 DRM_DEBUG_DRIVER("MISR read failed\n");
248 return drm_crtc_add_crc_entry(crtc, true,
249 drm_crtc_accurate_vblank_count(crtc), crcs);
252 static int dpu_crtc_get_crc(struct drm_crtc *crtc)
254 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
256 /* Skip first 2 frames in case of "uncooked" CRCs */
257 if (crtc_state->crc_frame_skip_count < 2) {
258 crtc_state->crc_frame_skip_count++;
262 if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
263 return dpu_crtc_get_lm_crc(crtc, crtc_state);
264 else if (crtc_state->crc_source == DPU_CRTC_CRC_SOURCE_ENCODER)
265 return dpu_crtc_get_encoder_crc(crtc);
270 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
272 int *vpos, int *hpos,
273 ktime_t *stime, ktime_t *etime,
274 const struct drm_display_mode *mode)
276 unsigned int pipe = crtc->index;
277 struct drm_encoder *encoder;
278 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
280 encoder = get_encoder_from_crtc(crtc);
282 DRM_ERROR("no encoder found for crtc %d\n", pipe);
286 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
287 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
290 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
291 * the end of VFP. Translate the porch values relative to the line
295 vactive_start = vsw + vbp + 1;
296 vactive_end = vactive_start + mode->crtc_vdisplay;
298 /* last scan line before VSYNC */
299 vfp_end = mode->crtc_vtotal;
302 *stime = ktime_get();
304 line = dpu_encoder_get_linecount(encoder);
306 if (line < vactive_start)
307 line -= vactive_start;
308 else if (line > vactive_end)
309 line = line - vfp_end - vactive_start;
311 line -= vactive_start;
317 *etime = ktime_get();
322 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
323 struct dpu_plane_state *pstate, struct dpu_format *format)
325 struct dpu_hw_mixer *lm = mixer->hw_lm;
327 uint32_t fg_alpha, bg_alpha;
329 fg_alpha = pstate->base.alpha >> 8;
330 bg_alpha = 0xff - fg_alpha;
332 /* default to opaque blending */
333 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
334 !format->alpha_enable) {
335 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
336 DPU_BLEND_BG_ALPHA_BG_CONST;
337 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
338 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
339 DPU_BLEND_BG_ALPHA_FG_PIXEL;
340 if (fg_alpha != 0xff) {
342 blend_op |= DPU_BLEND_BG_MOD_ALPHA |
343 DPU_BLEND_BG_INV_MOD_ALPHA;
345 blend_op |= DPU_BLEND_BG_INV_ALPHA;
348 /* coverage blending */
349 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
350 DPU_BLEND_BG_ALPHA_FG_PIXEL;
351 if (fg_alpha != 0xff) {
353 blend_op |= DPU_BLEND_FG_MOD_ALPHA |
354 DPU_BLEND_FG_INV_MOD_ALPHA |
355 DPU_BLEND_BG_MOD_ALPHA |
356 DPU_BLEND_BG_INV_MOD_ALPHA;
358 blend_op |= DPU_BLEND_BG_INV_ALPHA;
362 lm->ops.setup_blend_config(lm, pstate->stage,
363 fg_alpha, bg_alpha, blend_op);
365 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
366 &format->base.pixel_format, format->alpha_enable, blend_op);
369 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
371 struct dpu_crtc_state *crtc_state;
372 int lm_idx, lm_horiz_position;
374 crtc_state = to_dpu_crtc_state(crtc->state);
376 lm_horiz_position = 0;
377 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
378 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
379 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
380 struct dpu_hw_mixer_cfg cfg;
382 if (!lm_roi || !drm_rect_visible(lm_roi))
385 cfg.out_width = drm_rect_width(lm_roi);
386 cfg.out_height = drm_rect_height(lm_roi);
387 cfg.right_mixer = lm_horiz_position++;
389 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
393 static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
394 struct drm_plane *plane,
395 struct dpu_crtc_mixer *mixer,
397 enum dpu_stage stage,
398 struct dpu_format *format,
400 struct dpu_sw_pipe *pipe,
401 unsigned int stage_idx,
402 struct dpu_hw_stage_cfg *stage_cfg
406 enum dpu_sspp sspp_idx;
407 struct drm_plane_state *state;
409 sspp_idx = pipe->sspp->idx;
411 state = plane->state;
413 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
414 state, to_dpu_plane_state(state), stage_idx,
415 format->base.pixel_format,
418 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n",
422 sspp_idx - SSPP_NONE,
423 state->fb ? state->fb->base.id : -1,
424 pipe->multirect_index);
426 stage_cfg->stage[stage][stage_idx] = sspp_idx;
427 stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index;
429 /* blend config update */
430 for (lm_idx = 0; lm_idx < num_mixers; lm_idx++)
431 mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx);
434 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
435 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
436 struct dpu_hw_stage_cfg *stage_cfg)
438 struct drm_plane *plane;
439 struct drm_framebuffer *fb;
440 struct drm_plane_state *state;
441 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
442 struct dpu_plane_state *pstate = NULL;
443 struct dpu_format *format;
444 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
447 bool bg_alpha_enable = false;
448 DECLARE_BITMAP(fetch_active, SSPP_MAX);
450 memset(fetch_active, 0, sizeof(fetch_active));
451 drm_atomic_crtc_for_each_plane(plane, crtc) {
452 state = plane->state;
459 pstate = to_dpu_plane_state(state);
462 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
464 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
465 bg_alpha_enable = true;
467 set_bit(pstate->pipe.sspp->idx, fetch_active);
468 _dpu_crtc_blend_setup_pipe(crtc, plane,
469 mixer, cstate->num_mixers,
471 format, fb ? fb->modifier : 0,
472 &pstate->pipe, 0, stage_cfg);
474 if (pstate->r_pipe.sspp) {
475 set_bit(pstate->r_pipe.sspp->idx, fetch_active);
476 _dpu_crtc_blend_setup_pipe(crtc, plane,
477 mixer, cstate->num_mixers,
479 format, fb ? fb->modifier : 0,
480 &pstate->r_pipe, 1, stage_cfg);
483 /* blend config update */
484 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
485 _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate, format);
487 if (bg_alpha_enable && !format->alpha_enable)
488 mixer[lm_idx].mixer_op_mode = 0;
490 mixer[lm_idx].mixer_op_mode |=
495 if (ctl->ops.set_active_pipes)
496 ctl->ops.set_active_pipes(ctl, fetch_active);
498 _dpu_crtc_program_lm_output_roi(crtc);
502 * _dpu_crtc_blend_setup - configure crtc mixers
503 * @crtc: Pointer to drm crtc structure
505 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
507 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
508 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
509 struct dpu_crtc_mixer *mixer = cstate->mixers;
510 struct dpu_hw_ctl *ctl;
511 struct dpu_hw_mixer *lm;
512 struct dpu_hw_stage_cfg stage_cfg;
515 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
517 for (i = 0; i < cstate->num_mixers; i++) {
518 mixer[i].mixer_op_mode = 0;
519 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
520 mixer[i].lm_ctl->ops.clear_all_blendstages(
524 /* initialize stage cfg */
525 memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
527 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
529 for (i = 0; i < cstate->num_mixers; i++) {
530 ctl = mixer[i].lm_ctl;
533 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
535 /* stage config flush mask */
536 ctl->ops.update_pending_flush_mixer(ctl,
537 mixer[i].hw_lm->idx);
539 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d\n",
540 mixer[i].hw_lm->idx - LM_0,
541 mixer[i].mixer_op_mode,
544 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
550 * _dpu_crtc_complete_flip - signal pending page_flip events
551 * Any pending vblank events are added to the vblank_event_list
552 * so that the next vblank interrupt shall signal them.
553 * However PAGE_FLIP events are not handled through the vblank_event_list.
554 * This API signals any pending PAGE_FLIP events requested through
555 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
556 * @crtc: Pointer to drm crtc structure
558 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
560 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
561 struct drm_device *dev = crtc->dev;
564 spin_lock_irqsave(&dev->event_lock, flags);
565 if (dpu_crtc->event) {
566 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
568 trace_dpu_crtc_complete_flip(DRMID(crtc));
569 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
570 dpu_crtc->event = NULL;
572 spin_unlock_irqrestore(&dev->event_lock, flags);
575 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
577 struct drm_encoder *encoder;
580 * TODO: This function is called from dpu debugfs and as part of atomic
581 * check. When called from debugfs, the crtc->mutex must be held to
582 * read crtc->state. However reading crtc->state from atomic check isn't
583 * allowed (unless you have a good reason, a big comment, and a deep
584 * understanding of how the atomic/modeset locks work (<- and this is
585 * probably not possible)). So we'll keep the WARN_ON here for now, but
586 * really we need to figure out a better way to track our operating mode
588 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
590 /* TODO: Returns the first INTF_MODE, could there be multiple values? */
591 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
592 return dpu_encoder_get_intf_mode(encoder);
594 return INTF_MODE_NONE;
597 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
599 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
601 /* keep statistics on vblank callback - with auto reset via debugfs */
602 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
603 dpu_crtc->vblank_cb_time = ktime_get();
605 dpu_crtc->vblank_cb_count++;
607 dpu_crtc_get_crc(crtc);
609 drm_crtc_handle_vblank(crtc);
610 trace_dpu_crtc_vblank_cb(DRMID(crtc));
613 static void dpu_crtc_frame_event_work(struct kthread_work *work)
615 struct dpu_crtc_frame_event *fevent = container_of(work,
616 struct dpu_crtc_frame_event, work);
617 struct drm_crtc *crtc = fevent->crtc;
618 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
620 bool frame_done = false;
622 DPU_ATRACE_BEGIN("crtc_frame_event");
624 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
625 ktime_to_ns(fevent->ts));
627 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
628 | DPU_ENCODER_FRAME_EVENT_ERROR
629 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
631 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
632 /* ignore vblank when not pending */
633 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
634 /* release bandwidth and other resources */
635 trace_dpu_crtc_frame_event_done(DRMID(crtc),
637 dpu_core_perf_crtc_release_bw(crtc);
639 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
643 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
644 | DPU_ENCODER_FRAME_EVENT_ERROR))
648 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
649 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
650 crtc->base.id, ktime_to_ns(fevent->ts));
653 complete_all(&dpu_crtc->frame_done_comp);
655 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
656 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
657 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
658 DPU_ATRACE_END("crtc_frame_event");
662 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
663 * registers this API to encoder for all frame event callbacks like
664 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
665 * from different context - IRQ, user thread, commit_thread, etc. Each event
666 * should be carefully reviewed and should be processed in proper task context
667 * to avoid schedulin delay or properly manage the irq context's bottom half
670 static void dpu_crtc_frame_event_cb(void *data, u32 event)
672 struct drm_crtc *crtc = (struct drm_crtc *)data;
673 struct dpu_crtc *dpu_crtc;
674 struct msm_drm_private *priv;
675 struct dpu_crtc_frame_event *fevent;
679 /* Nothing to do on idle event */
680 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
683 dpu_crtc = to_dpu_crtc(crtc);
684 priv = crtc->dev->dev_private;
685 crtc_id = drm_crtc_index(crtc);
687 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
689 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
690 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
691 struct dpu_crtc_frame_event, list);
693 list_del_init(&fevent->list);
694 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
697 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
701 fevent->event = event;
703 fevent->ts = ktime_get();
704 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
707 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
709 trace_dpu_crtc_complete_commit(DRMID(crtc));
710 dpu_core_perf_crtc_update(crtc, 0);
711 _dpu_crtc_complete_flip(crtc);
714 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
715 struct drm_crtc_state *state)
717 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
718 struct drm_display_mode *adj_mode = &state->adjusted_mode;
719 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
722 for (i = 0; i < cstate->num_mixers; i++) {
723 struct drm_rect *r = &cstate->lm_bounds[i];
724 r->x1 = crtc_split_width * i;
726 r->x2 = r->x1 + crtc_split_width;
727 r->y2 = adj_mode->vdisplay;
729 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
733 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
734 struct dpu_hw_pcc_cfg *cfg)
736 struct drm_color_ctm *ctm;
738 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
740 ctm = (struct drm_color_ctm *)state->ctm->data;
745 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
746 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
747 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
749 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
750 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
751 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
753 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
754 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
755 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
758 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
760 struct drm_crtc_state *state = crtc->state;
761 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
762 struct dpu_crtc_mixer *mixer = cstate->mixers;
763 struct dpu_hw_pcc_cfg cfg;
764 struct dpu_hw_ctl *ctl;
765 struct dpu_hw_dspp *dspp;
769 if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state))
772 for (i = 0; i < cstate->num_mixers; i++) {
773 ctl = mixer[i].lm_ctl;
774 dspp = mixer[i].hw_dspp;
776 if (!dspp || !dspp->ops.setup_pcc)
780 dspp->ops.setup_pcc(dspp, NULL);
782 _dpu_crtc_get_pcc_coeff(state, &cfg);
783 dspp->ops.setup_pcc(dspp, &cfg);
786 /* stage config flush mask */
787 ctl->ops.update_pending_flush_dspp(ctl,
788 mixer[i].hw_dspp->idx, DPU_DSPP_PCC);
792 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
793 struct drm_atomic_state *state)
795 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
796 struct drm_encoder *encoder;
798 if (!crtc->state->enable) {
799 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
800 crtc->base.id, crtc->state->enable);
804 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
806 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
808 /* encoder will trigger pending mask now */
809 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
810 dpu_encoder_trigger_kickoff_pending(encoder);
813 * If no mixers have been allocated in dpu_crtc_atomic_check(),
814 * it means we are trying to flush a CRTC whose state is disabled:
815 * nothing else needs to be done.
817 if (unlikely(!cstate->num_mixers))
820 _dpu_crtc_blend_setup(crtc);
822 _dpu_crtc_setup_cp_blocks(crtc);
825 * PP_DONE irq is only used by command mode for now.
826 * It is better to request pending before FLUSH and START trigger
827 * to make sure no pp_done irq missed.
828 * This is safe because no pp_done will happen before SW trigger
833 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
834 struct drm_atomic_state *state)
836 struct dpu_crtc *dpu_crtc;
837 struct drm_device *dev;
838 struct drm_plane *plane;
839 struct msm_drm_private *priv;
841 struct dpu_crtc_state *cstate;
843 if (!crtc->state->enable) {
844 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
845 crtc->base.id, crtc->state->enable);
849 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
851 dpu_crtc = to_dpu_crtc(crtc);
852 cstate = to_dpu_crtc_state(crtc->state);
854 priv = dev->dev_private;
856 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
857 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
861 WARN_ON(dpu_crtc->event);
862 spin_lock_irqsave(&dev->event_lock, flags);
863 dpu_crtc->event = crtc->state->event;
864 crtc->state->event = NULL;
865 spin_unlock_irqrestore(&dev->event_lock, flags);
868 * If no mixers has been allocated in dpu_crtc_atomic_check(),
869 * it means we are trying to flush a CRTC whose state is disabled:
870 * nothing else needs to be done.
872 if (unlikely(!cstate->num_mixers))
875 /* update performance setting before crtc kickoff */
876 dpu_core_perf_crtc_update(crtc, 1);
879 * Final plane updates: Give each plane a chance to complete all
880 * required writes/flushing before crtc's "flush
881 * everything" call below.
883 drm_atomic_crtc_for_each_plane(plane, crtc) {
884 if (dpu_crtc->smmu_state.transition_error)
885 dpu_plane_set_error(plane, true);
886 dpu_plane_flush(plane);
889 /* Kickoff will be scheduled by outer layer */
893 * dpu_crtc_destroy_state - state destroy hook
895 * @state: CRTC state object to release
897 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
898 struct drm_crtc_state *state)
900 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
902 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
904 __drm_atomic_helper_crtc_destroy_state(state);
909 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
911 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
914 if (!atomic_read(&dpu_crtc->frame_pending)) {
915 DRM_DEBUG_ATOMIC("no frames pending\n");
919 DPU_ATRACE_BEGIN("frame done completion wait");
920 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
921 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
923 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
926 DPU_ATRACE_END("frame done completion wait");
931 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
933 struct drm_encoder *encoder;
934 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
935 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
936 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
939 * If no mixers has been allocated in dpu_crtc_atomic_check(),
940 * it means we are trying to start a CRTC whose state is disabled:
941 * nothing else needs to be done.
943 if (unlikely(!cstate->num_mixers))
946 DPU_ATRACE_BEGIN("crtc_commit");
948 drm_for_each_encoder_mask(encoder, crtc->dev,
949 crtc->state->encoder_mask) {
950 if (!dpu_encoder_is_valid_for_commit(encoder)) {
951 DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
956 * Encoder will flush/start now, unless it has a tx pending. If so, it
957 * may delay and flush at an irq event (e.g. ppdone)
959 drm_for_each_encoder_mask(encoder, crtc->dev,
960 crtc->state->encoder_mask)
961 dpu_encoder_prepare_for_kickoff(encoder);
963 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
964 /* acquire bandwidth and other resources */
965 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
967 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
969 dpu_crtc->play_count++;
971 dpu_vbif_clear_errors(dpu_kms);
973 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
974 dpu_encoder_kickoff(encoder);
976 reinit_completion(&dpu_crtc->frame_done_comp);
979 DPU_ATRACE_END("crtc_commit");
982 static void dpu_crtc_reset(struct drm_crtc *crtc)
984 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
987 dpu_crtc_destroy_state(crtc, crtc->state);
990 __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
992 __drm_atomic_helper_crtc_reset(crtc, NULL);
996 * dpu_crtc_duplicate_state - state duplicate hook
997 * @crtc: Pointer to drm crtc structure
999 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
1001 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
1003 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
1005 DPU_ERROR("failed to allocate state\n");
1009 /* duplicate base helper */
1010 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
1012 return &cstate->base;
1015 static void dpu_crtc_atomic_print_state(struct drm_printer *p,
1016 const struct drm_crtc_state *state)
1018 const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
1021 for (i = 0; i < cstate->num_mixers; i++) {
1022 drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
1023 drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
1024 if (cstate->mixers[i].hw_dspp)
1025 drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
1029 static void dpu_crtc_disable(struct drm_crtc *crtc,
1030 struct drm_atomic_state *state)
1032 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
1034 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1035 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
1036 struct drm_encoder *encoder;
1037 unsigned long flags;
1038 bool release_bandwidth = false;
1040 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1042 /* If disable is triggered while in self refresh mode,
1043 * reset the encoder software state so that in enable
1044 * it won't trigger a warn while assigning crtc.
1046 if (old_crtc_state->self_refresh_active) {
1047 drm_for_each_encoder_mask(encoder, crtc->dev,
1048 old_crtc_state->encoder_mask) {
1049 dpu_encoder_assign_crtc(encoder, NULL);
1054 /* Disable/save vblank irq handling */
1055 drm_crtc_vblank_off(crtc);
1057 drm_for_each_encoder_mask(encoder, crtc->dev,
1058 old_crtc_state->encoder_mask) {
1059 /* in video mode, we hold an extra bandwidth reference
1060 * as we cannot drop bandwidth at frame-done if any
1061 * crtc is being used in video mode.
1063 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1064 release_bandwidth = true;
1067 * If disable is triggered during psr active(e.g: screen dim in PSR),
1068 * we will need encoder->crtc connection to process the device sleep &
1069 * preserve it during psr sequence.
1071 if (!crtc->state->self_refresh_active)
1072 dpu_encoder_assign_crtc(encoder, NULL);
1075 /* wait for frame_event_done completion */
1076 if (_dpu_crtc_wait_for_frame_done(crtc))
1077 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
1079 atomic_read(&dpu_crtc->frame_pending));
1081 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
1082 dpu_crtc->enabled = false;
1084 if (atomic_read(&dpu_crtc->frame_pending)) {
1085 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
1086 atomic_read(&dpu_crtc->frame_pending));
1087 if (release_bandwidth)
1088 dpu_core_perf_crtc_release_bw(crtc);
1089 atomic_set(&dpu_crtc->frame_pending, 0);
1092 dpu_core_perf_crtc_update(crtc, 0);
1094 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
1095 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
1097 memset(cstate->mixers, 0, sizeof(cstate->mixers));
1098 cstate->num_mixers = 0;
1100 /* disable clk & bw control until clk & bw properties are set */
1101 cstate->bw_control = false;
1102 cstate->bw_split_vote = false;
1104 if (crtc->state->event && !crtc->state->active) {
1105 spin_lock_irqsave(&crtc->dev->event_lock, flags);
1106 drm_crtc_send_vblank_event(crtc, crtc->state->event);
1107 crtc->state->event = NULL;
1108 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1111 pm_runtime_put_sync(crtc->dev->dev);
1114 static void dpu_crtc_enable(struct drm_crtc *crtc,
1115 struct drm_atomic_state *state)
1117 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1118 struct drm_encoder *encoder;
1119 bool request_bandwidth = false;
1120 struct drm_crtc_state *old_crtc_state;
1122 old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
1124 pm_runtime_get_sync(crtc->dev->dev);
1126 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1128 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
1129 /* in video mode, we hold an extra bandwidth reference
1130 * as we cannot drop bandwidth at frame-done if any
1131 * crtc is being used in video mode.
1133 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1134 request_bandwidth = true;
1135 dpu_encoder_register_frame_event_callback(encoder,
1136 dpu_crtc_frame_event_cb, (void *)crtc);
1139 if (request_bandwidth)
1140 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1142 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
1143 dpu_crtc->enabled = true;
1145 if (!old_crtc_state->self_refresh_active) {
1146 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
1147 dpu_encoder_assign_crtc(encoder, crtc);
1150 /* Enable/restore vblank irq handling */
1151 drm_crtc_vblank_on(crtc);
1154 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
1156 struct drm_crtc *crtc = cstate->crtc;
1157 struct drm_encoder *encoder;
1159 if (cstate->self_refresh_active)
1162 drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
1163 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
1171 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
1172 struct drm_atomic_state *state)
1174 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
1176 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1177 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
1179 const struct drm_plane_state *pstate;
1180 struct drm_plane *plane;
1184 bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
1186 if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
1187 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
1188 crtc->base.id, crtc_state->enable,
1189 crtc_state->active);
1190 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
1194 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
1196 /* force a full mode set if active state changed */
1197 if (crtc_state->active_changed)
1198 crtc_state->mode_changed = true;
1200 if (cstate->num_mixers)
1201 _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
1203 /* FIXME: move this to dpu_plane_atomic_check? */
1204 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
1205 struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
1207 if (IS_ERR_OR_NULL(pstate)) {
1208 rc = PTR_ERR(pstate);
1209 DPU_ERROR("%s: failed to get plane%d state, %d\n",
1210 dpu_crtc->name, plane->base.id, rc);
1214 if (!pstate->visible)
1217 dpu_pstate->needs_dirtyfb = needs_dirtyfb;
1220 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1222 rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1224 DPU_ERROR("crtc%d failed performance check %d\n",
1232 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1234 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1235 struct drm_encoder *enc;
1237 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1240 * Normally we would iterate through encoder_mask in crtc state to find
1241 * attached encoders. In this case, we might be disabling vblank _after_
1242 * encoder_mask has been cleared.
1244 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1245 * disable (which is also after encoder_mask is cleared). So instead of
1246 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1247 * currently assigned to our crtc.
1249 * Note also that this function cannot be called while crtc is disabled
1250 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1251 * about the assigned crtcs being inconsistent with the current state
1252 * (which means no need to worry about modeset locks).
1254 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1255 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1258 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1264 #ifdef CONFIG_DEBUG_FS
1265 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1267 struct dpu_crtc *dpu_crtc;
1268 struct dpu_plane_state *pstate = NULL;
1269 struct dpu_crtc_mixer *m;
1271 struct drm_crtc *crtc;
1272 struct drm_plane *plane;
1273 struct drm_display_mode *mode;
1274 struct drm_framebuffer *fb;
1275 struct drm_plane_state *state;
1276 struct dpu_crtc_state *cstate;
1280 dpu_crtc = s->private;
1281 crtc = &dpu_crtc->base;
1283 drm_modeset_lock_all(crtc->dev);
1284 cstate = to_dpu_crtc_state(crtc->state);
1286 mode = &crtc->state->adjusted_mode;
1287 out_width = mode->hdisplay / cstate->num_mixers;
1289 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1290 mode->hdisplay, mode->vdisplay);
1294 for (i = 0; i < cstate->num_mixers; ++i) {
1295 m = &cstate->mixers[i];
1296 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1297 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1298 out_width, mode->vdisplay);
1303 drm_atomic_crtc_for_each_plane(plane, crtc) {
1304 pstate = to_dpu_plane_state(plane->state);
1305 state = plane->state;
1307 if (!pstate || !state)
1310 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1313 if (plane->state->fb) {
1314 fb = plane->state->fb;
1316 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1317 fb->base.id, (char *) &fb->format->format,
1318 fb->width, fb->height);
1319 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1320 seq_printf(s, "cpp[%d]:%u ",
1321 i, fb->format->cpp[i]);
1322 seq_puts(s, "\n\t");
1324 seq_printf(s, "modifier:%8llu ", fb->modifier);
1328 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1329 seq_printf(s, "pitches[%d]:%8u ", i,
1334 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1335 seq_printf(s, "offsets[%d]:%8u ", i,
1340 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1341 state->src_x, state->src_y, state->src_w, state->src_h);
1343 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1344 state->crtc_x, state->crtc_y, state->crtc_w,
1346 seq_printf(s, "\tsspp[0]:%s\n",
1347 pstate->pipe.sspp->cap->name);
1348 seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n",
1349 pstate->pipe.multirect_mode, pstate->pipe.multirect_index);
1350 if (pstate->r_pipe.sspp) {
1351 seq_printf(s, "\tsspp[1]:%s\n",
1352 pstate->r_pipe.sspp->cap->name);
1353 seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n",
1354 pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index);
1359 if (dpu_crtc->vblank_cb_count) {
1360 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1361 s64 diff_ms = ktime_to_ms(diff);
1362 s64 fps = diff_ms ? div_s64(
1363 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1366 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1367 fps, dpu_crtc->vblank_cb_count,
1368 ktime_to_ms(diff), dpu_crtc->play_count);
1370 /* reset time & count for next measurement */
1371 dpu_crtc->vblank_cb_count = 0;
1372 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1375 drm_modeset_unlock_all(crtc->dev);
1380 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1382 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1384 struct drm_crtc *crtc = s->private;
1385 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1387 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1388 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1389 seq_printf(s, "core_clk_rate: %llu\n",
1390 dpu_crtc->cur_perf.core_clk_rate);
1391 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1392 seq_printf(s, "max_per_pipe_ib: %llu\n",
1393 dpu_crtc->cur_perf.max_per_pipe_ib);
1397 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1399 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1401 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1403 debugfs_create_file("status", 0400,
1404 crtc->debugfs_entry,
1405 dpu_crtc, &_dpu_debugfs_status_fops);
1406 debugfs_create_file("state", 0600,
1407 crtc->debugfs_entry,
1409 &dpu_crtc_debugfs_state_fops);
1414 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1418 #endif /* CONFIG_DEBUG_FS */
1420 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1422 return _dpu_crtc_init_debugfs(crtc);
1425 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1426 .set_config = drm_atomic_helper_set_config,
1427 .page_flip = drm_atomic_helper_page_flip,
1428 .reset = dpu_crtc_reset,
1429 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1430 .atomic_destroy_state = dpu_crtc_destroy_state,
1431 .atomic_print_state = dpu_crtc_atomic_print_state,
1432 .late_register = dpu_crtc_late_register,
1433 .verify_crc_source = dpu_crtc_verify_crc_source,
1434 .set_crc_source = dpu_crtc_set_crc_source,
1435 .enable_vblank = msm_crtc_enable_vblank,
1436 .disable_vblank = msm_crtc_disable_vblank,
1437 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1438 .get_vblank_counter = dpu_crtc_get_vblank_counter,
1441 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1442 .atomic_disable = dpu_crtc_disable,
1443 .atomic_enable = dpu_crtc_enable,
1444 .atomic_check = dpu_crtc_atomic_check,
1445 .atomic_begin = dpu_crtc_atomic_begin,
1446 .atomic_flush = dpu_crtc_atomic_flush,
1447 .get_scanout_position = dpu_crtc_get_scanout_position,
1450 /* initialize crtc */
1451 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1452 struct drm_plane *cursor)
1454 struct msm_drm_private *priv = dev->dev_private;
1455 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
1456 struct drm_crtc *crtc = NULL;
1457 struct dpu_crtc *dpu_crtc;
1460 dpu_crtc = drmm_crtc_alloc_with_planes(dev, struct dpu_crtc, base,
1465 if (IS_ERR(dpu_crtc))
1466 return ERR_CAST(dpu_crtc);
1468 crtc = &dpu_crtc->base;
1471 spin_lock_init(&dpu_crtc->spin_lock);
1472 atomic_set(&dpu_crtc->frame_pending, 0);
1474 init_completion(&dpu_crtc->frame_done_comp);
1476 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1478 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1479 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1480 list_add(&dpu_crtc->frame_events[i].list,
1481 &dpu_crtc->frame_event_list);
1482 kthread_init_work(&dpu_crtc->frame_events[i].work,
1483 dpu_crtc_frame_event_work);
1486 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1488 if (dpu_kms->catalog->dspp_count)
1489 drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1491 /* save user friendly CRTC name for later */
1492 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1494 /* initialize event handling */
1495 spin_lock_init(&dpu_crtc->event_lock);
1497 ret = drm_self_refresh_helper_init(crtc);
1499 DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n",
1501 return ERR_PTR(ret);
1504 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);