1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
33 /* layer mixer index on dpu_crtc */
37 /* timeout in ms waiting for frame done */
38 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
40 #define CONVERT_S3_15(val) \
41 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
45 struct msm_drm_private *priv = crtc->dev->dev_private;
47 return to_dpu_kms(priv->kms);
50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
52 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
57 drm_crtc_cleanup(crtc);
61 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
63 struct drm_device *dev = crtc->dev;
64 struct drm_encoder *encoder;
66 drm_for_each_encoder(encoder, dev)
67 if (encoder->crtc == crtc)
73 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
76 !strcmp(src_name, "none"))
77 return DPU_CRTC_CRC_SOURCE_NONE;
78 if (!strcmp(src_name, "auto") ||
79 !strcmp(src_name, "lm"))
80 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
82 return DPU_CRTC_CRC_SOURCE_INVALID;
85 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
86 const char *src_name, size_t *values_cnt)
88 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
89 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
92 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
96 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
97 *values_cnt = crtc_state->num_mixers;
102 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
104 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
105 enum dpu_crtc_crc_source current_source;
106 struct dpu_crtc_state *crtc_state;
107 struct drm_device *drm_dev = crtc->dev;
108 struct dpu_crtc_mixer *m;
115 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
119 ret = drm_modeset_lock(&crtc->mutex, NULL);
124 enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
125 crtc_state = to_dpu_crtc_state(crtc->state);
127 spin_lock_irq(&drm_dev->event_lock);
128 current_source = crtc_state->crc_source;
129 spin_unlock_irq(&drm_dev->event_lock);
131 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
133 if (!was_enabled && enable) {
134 ret = drm_crtc_vblank_get(crtc);
139 } else if (was_enabled && !enable) {
140 drm_crtc_vblank_put(crtc);
143 spin_lock_irq(&drm_dev->event_lock);
144 crtc_state->crc_source = source;
145 spin_unlock_irq(&drm_dev->event_lock);
147 crtc_state->crc_frame_skip_count = 0;
149 for (i = 0; i < crtc_state->num_mixers; ++i) {
150 m = &crtc_state->mixers[i];
152 if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
155 /* Calculate MISR over 1 frame */
156 m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
161 drm_modeset_unlock(&crtc->mutex);
166 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
168 struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
170 DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
174 return dpu_encoder_get_vsync_count(encoder);
178 static int dpu_crtc_get_crc(struct drm_crtc *crtc)
180 struct dpu_crtc_state *crtc_state;
181 struct dpu_crtc_mixer *m;
182 u32 crcs[CRTC_DUAL_MIXERS];
187 crtc_state = to_dpu_crtc_state(crtc->state);
189 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
191 /* Skip first 2 frames in case of "uncooked" CRCs */
192 if (crtc_state->crc_frame_skip_count < 2) {
193 crtc_state->crc_frame_skip_count++;
197 for (i = 0; i < crtc_state->num_mixers; ++i) {
199 m = &crtc_state->mixers[i];
201 if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
204 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
207 DRM_DEBUG_DRIVER("MISR read failed\n");
212 return drm_crtc_add_crc_entry(crtc, true,
213 drm_crtc_accurate_vblank_count(crtc), crcs);
216 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
218 int *vpos, int *hpos,
219 ktime_t *stime, ktime_t *etime,
220 const struct drm_display_mode *mode)
222 unsigned int pipe = crtc->index;
223 struct drm_encoder *encoder;
224 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
226 encoder = get_encoder_from_crtc(crtc);
228 DRM_ERROR("no encoder found for crtc %d\n", pipe);
232 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
233 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
236 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
237 * the end of VFP. Translate the porch values relative to the line
241 vactive_start = vsw + vbp + 1;
242 vactive_end = vactive_start + mode->crtc_vdisplay;
244 /* last scan line before VSYNC */
245 vfp_end = mode->crtc_vtotal;
248 *stime = ktime_get();
250 line = dpu_encoder_get_linecount(encoder);
252 if (line < vactive_start)
253 line -= vactive_start;
254 else if (line > vactive_end)
255 line = line - vfp_end - vactive_start;
257 line -= vactive_start;
263 *etime = ktime_get();
268 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
269 struct dpu_plane_state *pstate, struct dpu_format *format)
271 struct dpu_hw_mixer *lm = mixer->hw_lm;
273 uint32_t fg_alpha, bg_alpha;
275 fg_alpha = pstate->base.alpha >> 8;
276 bg_alpha = 0xff - fg_alpha;
278 /* default to opaque blending */
279 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
280 !format->alpha_enable) {
281 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
282 DPU_BLEND_BG_ALPHA_BG_CONST;
283 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
284 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
285 DPU_BLEND_BG_ALPHA_FG_PIXEL;
286 if (fg_alpha != 0xff) {
288 blend_op |= DPU_BLEND_BG_MOD_ALPHA |
289 DPU_BLEND_BG_INV_MOD_ALPHA;
291 blend_op |= DPU_BLEND_BG_INV_ALPHA;
294 /* coverage blending */
295 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
296 DPU_BLEND_BG_ALPHA_FG_PIXEL;
297 if (fg_alpha != 0xff) {
299 blend_op |= DPU_BLEND_FG_MOD_ALPHA |
300 DPU_BLEND_FG_INV_MOD_ALPHA |
301 DPU_BLEND_BG_MOD_ALPHA |
302 DPU_BLEND_BG_INV_MOD_ALPHA;
304 blend_op |= DPU_BLEND_BG_INV_ALPHA;
308 lm->ops.setup_blend_config(lm, pstate->stage,
309 fg_alpha, bg_alpha, blend_op);
311 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
312 &format->base.pixel_format, format->alpha_enable, blend_op);
315 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
317 struct dpu_crtc_state *crtc_state;
318 int lm_idx, lm_horiz_position;
320 crtc_state = to_dpu_crtc_state(crtc->state);
322 lm_horiz_position = 0;
323 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
324 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
325 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
326 struct dpu_hw_mixer_cfg cfg;
328 if (!lm_roi || !drm_rect_visible(lm_roi))
331 cfg.out_width = drm_rect_width(lm_roi);
332 cfg.out_height = drm_rect_height(lm_roi);
333 cfg.right_mixer = lm_horiz_position++;
335 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
339 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
340 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
342 struct drm_plane *plane;
343 struct drm_framebuffer *fb;
344 struct drm_plane_state *state;
345 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
346 struct dpu_plane_state *pstate = NULL;
347 struct dpu_format *format;
348 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
349 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
352 uint32_t stage_idx, lm_idx;
353 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
354 bool bg_alpha_enable = false;
355 DECLARE_BITMAP(fetch_active, SSPP_MAX);
357 memset(fetch_active, 0, sizeof(fetch_active));
358 drm_atomic_crtc_for_each_plane(plane, crtc) {
359 state = plane->state;
363 pstate = to_dpu_plane_state(state);
366 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
367 set_bit(dpu_plane_pipe(plane), fetch_active);
369 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
373 dpu_plane_pipe(plane) - SSPP_VIG0,
374 state->fb ? state->fb->base.id : -1);
376 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
378 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
379 bg_alpha_enable = true;
381 stage_idx = zpos_cnt[pstate->stage]++;
382 stage_cfg->stage[pstate->stage][stage_idx] =
383 dpu_plane_pipe(plane);
384 stage_cfg->multirect_index[pstate->stage][stage_idx] =
385 pstate->multirect_index;
387 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
388 state, pstate, stage_idx,
389 dpu_plane_pipe(plane) - SSPP_VIG0,
390 format->base.pixel_format,
391 fb ? fb->modifier : 0);
393 /* blend config update */
394 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
395 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
398 mixer[lm_idx].flush_mask |= flush_mask;
400 if (bg_alpha_enable && !format->alpha_enable)
401 mixer[lm_idx].mixer_op_mode = 0;
403 mixer[lm_idx].mixer_op_mode |=
408 if (ctl->ops.set_active_pipes)
409 ctl->ops.set_active_pipes(ctl, fetch_active);
411 _dpu_crtc_program_lm_output_roi(crtc);
415 * _dpu_crtc_blend_setup - configure crtc mixers
416 * @crtc: Pointer to drm crtc structure
418 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
420 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
421 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
422 struct dpu_crtc_mixer *mixer = cstate->mixers;
423 struct dpu_hw_ctl *ctl;
424 struct dpu_hw_mixer *lm;
427 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
429 for (i = 0; i < cstate->num_mixers; i++) {
430 mixer[i].mixer_op_mode = 0;
431 mixer[i].flush_mask = 0;
432 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
433 mixer[i].lm_ctl->ops.clear_all_blendstages(
437 /* initialize stage cfg */
438 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
440 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
442 for (i = 0; i < cstate->num_mixers; i++) {
443 ctl = mixer[i].lm_ctl;
446 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
448 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
449 mixer[i].hw_lm->idx);
451 /* stage config flush mask */
452 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
454 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
455 mixer[i].hw_lm->idx - LM_0,
456 mixer[i].mixer_op_mode,
458 mixer[i].flush_mask);
460 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
461 &dpu_crtc->stage_cfg);
466 * _dpu_crtc_complete_flip - signal pending page_flip events
467 * Any pending vblank events are added to the vblank_event_list
468 * so that the next vblank interrupt shall signal them.
469 * However PAGE_FLIP events are not handled through the vblank_event_list.
470 * This API signals any pending PAGE_FLIP events requested through
471 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
472 * @crtc: Pointer to drm crtc structure
474 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
476 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
477 struct drm_device *dev = crtc->dev;
480 spin_lock_irqsave(&dev->event_lock, flags);
481 if (dpu_crtc->event) {
482 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
484 trace_dpu_crtc_complete_flip(DRMID(crtc));
485 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
486 dpu_crtc->event = NULL;
488 spin_unlock_irqrestore(&dev->event_lock, flags);
491 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
493 struct drm_encoder *encoder;
496 * TODO: This function is called from dpu debugfs and as part of atomic
497 * check. When called from debugfs, the crtc->mutex must be held to
498 * read crtc->state. However reading crtc->state from atomic check isn't
499 * allowed (unless you have a good reason, a big comment, and a deep
500 * understanding of how the atomic/modeset locks work (<- and this is
501 * probably not possible)). So we'll keep the WARN_ON here for now, but
502 * really we need to figure out a better way to track our operating mode
504 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
506 /* TODO: Returns the first INTF_MODE, could there be multiple values? */
507 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
508 return dpu_encoder_get_intf_mode(encoder);
510 return INTF_MODE_NONE;
513 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
515 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
517 /* keep statistics on vblank callback - with auto reset via debugfs */
518 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
519 dpu_crtc->vblank_cb_time = ktime_get();
521 dpu_crtc->vblank_cb_count++;
523 dpu_crtc_get_crc(crtc);
525 drm_crtc_handle_vblank(crtc);
526 trace_dpu_crtc_vblank_cb(DRMID(crtc));
529 static void dpu_crtc_frame_event_work(struct kthread_work *work)
531 struct dpu_crtc_frame_event *fevent = container_of(work,
532 struct dpu_crtc_frame_event, work);
533 struct drm_crtc *crtc = fevent->crtc;
534 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
536 bool frame_done = false;
538 DPU_ATRACE_BEGIN("crtc_frame_event");
540 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
541 ktime_to_ns(fevent->ts));
543 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
544 | DPU_ENCODER_FRAME_EVENT_ERROR
545 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
547 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
548 /* ignore vblank when not pending */
549 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
550 /* release bandwidth and other resources */
551 trace_dpu_crtc_frame_event_done(DRMID(crtc),
553 dpu_core_perf_crtc_release_bw(crtc);
555 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
559 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
560 | DPU_ENCODER_FRAME_EVENT_ERROR))
564 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
565 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
566 crtc->base.id, ktime_to_ns(fevent->ts));
569 complete_all(&dpu_crtc->frame_done_comp);
571 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
572 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
573 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
574 DPU_ATRACE_END("crtc_frame_event");
578 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
579 * registers this API to encoder for all frame event callbacks like
580 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
581 * from different context - IRQ, user thread, commit_thread, etc. Each event
582 * should be carefully reviewed and should be processed in proper task context
583 * to avoid schedulin delay or properly manage the irq context's bottom half
586 static void dpu_crtc_frame_event_cb(void *data, u32 event)
588 struct drm_crtc *crtc = (struct drm_crtc *)data;
589 struct dpu_crtc *dpu_crtc;
590 struct msm_drm_private *priv;
591 struct dpu_crtc_frame_event *fevent;
595 /* Nothing to do on idle event */
596 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
599 dpu_crtc = to_dpu_crtc(crtc);
600 priv = crtc->dev->dev_private;
601 crtc_id = drm_crtc_index(crtc);
603 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
605 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
606 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
607 struct dpu_crtc_frame_event, list);
609 list_del_init(&fevent->list);
610 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
613 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
617 fevent->event = event;
619 fevent->ts = ktime_get();
620 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
623 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
625 trace_dpu_crtc_complete_commit(DRMID(crtc));
626 dpu_core_perf_crtc_update(crtc, 0, false);
627 _dpu_crtc_complete_flip(crtc);
630 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
631 struct drm_crtc_state *state)
633 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
634 struct drm_display_mode *adj_mode = &state->adjusted_mode;
635 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
638 for (i = 0; i < cstate->num_mixers; i++) {
639 struct drm_rect *r = &cstate->lm_bounds[i];
640 r->x1 = crtc_split_width * i;
642 r->x2 = r->x1 + crtc_split_width;
643 r->y2 = adj_mode->vdisplay;
645 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
649 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
650 struct dpu_hw_pcc_cfg *cfg)
652 struct drm_color_ctm *ctm;
654 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
656 ctm = (struct drm_color_ctm *)state->ctm->data;
661 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
662 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
663 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
665 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
666 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
667 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
669 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
670 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
671 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
674 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
676 struct drm_crtc_state *state = crtc->state;
677 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
678 struct dpu_crtc_mixer *mixer = cstate->mixers;
679 struct dpu_hw_pcc_cfg cfg;
680 struct dpu_hw_ctl *ctl;
681 struct dpu_hw_dspp *dspp;
685 if (!state->color_mgmt_changed)
688 for (i = 0; i < cstate->num_mixers; i++) {
689 ctl = mixer[i].lm_ctl;
690 dspp = mixer[i].hw_dspp;
692 if (!dspp || !dspp->ops.setup_pcc)
696 dspp->ops.setup_pcc(dspp, NULL);
698 _dpu_crtc_get_pcc_coeff(state, &cfg);
699 dspp->ops.setup_pcc(dspp, &cfg);
702 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
703 mixer[i].hw_dspp->idx);
705 /* stage config flush mask */
706 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
708 DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
709 mixer[i].hw_lm->idx - DSPP_0,
711 mixer[i].flush_mask);
715 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
716 struct drm_atomic_state *state)
718 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
719 struct drm_encoder *encoder;
721 if (!crtc->state->enable) {
722 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
723 crtc->base.id, crtc->state->enable);
727 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
729 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
731 /* encoder will trigger pending mask now */
732 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
733 dpu_encoder_trigger_kickoff_pending(encoder);
736 * If no mixers have been allocated in dpu_crtc_atomic_check(),
737 * it means we are trying to flush a CRTC whose state is disabled:
738 * nothing else needs to be done.
740 if (unlikely(!cstate->num_mixers))
743 _dpu_crtc_blend_setup(crtc);
745 _dpu_crtc_setup_cp_blocks(crtc);
748 * PP_DONE irq is only used by command mode for now.
749 * It is better to request pending before FLUSH and START trigger
750 * to make sure no pp_done irq missed.
751 * This is safe because no pp_done will happen before SW trigger
756 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
757 struct drm_atomic_state *state)
759 struct dpu_crtc *dpu_crtc;
760 struct drm_device *dev;
761 struct drm_plane *plane;
762 struct msm_drm_private *priv;
764 struct dpu_crtc_state *cstate;
766 if (!crtc->state->enable) {
767 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
768 crtc->base.id, crtc->state->enable);
772 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
774 dpu_crtc = to_dpu_crtc(crtc);
775 cstate = to_dpu_crtc_state(crtc->state);
777 priv = dev->dev_private;
779 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
780 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
784 WARN_ON(dpu_crtc->event);
785 spin_lock_irqsave(&dev->event_lock, flags);
786 dpu_crtc->event = crtc->state->event;
787 crtc->state->event = NULL;
788 spin_unlock_irqrestore(&dev->event_lock, flags);
791 * If no mixers has been allocated in dpu_crtc_atomic_check(),
792 * it means we are trying to flush a CRTC whose state is disabled:
793 * nothing else needs to be done.
795 if (unlikely(!cstate->num_mixers))
798 /* update performance setting before crtc kickoff */
799 dpu_core_perf_crtc_update(crtc, 1, false);
802 * Final plane updates: Give each plane a chance to complete all
803 * required writes/flushing before crtc's "flush
804 * everything" call below.
806 drm_atomic_crtc_for_each_plane(plane, crtc) {
807 if (dpu_crtc->smmu_state.transition_error)
808 dpu_plane_set_error(plane, true);
809 dpu_plane_flush(plane);
812 /* Kickoff will be scheduled by outer layer */
816 * dpu_crtc_destroy_state - state destroy hook
818 * @state: CRTC state object to release
820 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
821 struct drm_crtc_state *state)
823 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
825 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
827 __drm_atomic_helper_crtc_destroy_state(state);
832 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
834 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
837 if (!atomic_read(&dpu_crtc->frame_pending)) {
838 DRM_DEBUG_ATOMIC("no frames pending\n");
842 DPU_ATRACE_BEGIN("frame done completion wait");
843 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
844 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
846 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
849 DPU_ATRACE_END("frame done completion wait");
854 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
856 struct drm_encoder *encoder;
857 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
858 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
859 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
862 * If no mixers has been allocated in dpu_crtc_atomic_check(),
863 * it means we are trying to start a CRTC whose state is disabled:
864 * nothing else needs to be done.
866 if (unlikely(!cstate->num_mixers))
869 DPU_ATRACE_BEGIN("crtc_commit");
872 * Encoder will flush/start now, unless it has a tx pending. If so, it
873 * may delay and flush at an irq event (e.g. ppdone)
875 drm_for_each_encoder_mask(encoder, crtc->dev,
876 crtc->state->encoder_mask)
877 dpu_encoder_prepare_for_kickoff(encoder);
879 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
880 /* acquire bandwidth and other resources */
881 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
883 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
885 dpu_crtc->play_count++;
887 dpu_vbif_clear_errors(dpu_kms);
889 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
890 dpu_encoder_kickoff(encoder);
892 reinit_completion(&dpu_crtc->frame_done_comp);
893 DPU_ATRACE_END("crtc_commit");
896 static void dpu_crtc_reset(struct drm_crtc *crtc)
898 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
901 dpu_crtc_destroy_state(crtc, crtc->state);
903 __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
907 * dpu_crtc_duplicate_state - state duplicate hook
908 * @crtc: Pointer to drm crtc structure
910 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
912 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
914 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
916 DPU_ERROR("failed to allocate state\n");
920 /* duplicate base helper */
921 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
923 return &cstate->base;
926 static void dpu_crtc_disable(struct drm_crtc *crtc,
927 struct drm_atomic_state *state)
929 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
931 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
932 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
933 struct drm_encoder *encoder;
935 bool release_bandwidth = false;
937 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
939 /* Disable/save vblank irq handling */
940 drm_crtc_vblank_off(crtc);
942 drm_for_each_encoder_mask(encoder, crtc->dev,
943 old_crtc_state->encoder_mask) {
944 /* in video mode, we hold an extra bandwidth reference
945 * as we cannot drop bandwidth at frame-done if any
946 * crtc is being used in video mode.
948 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
949 release_bandwidth = true;
950 dpu_encoder_assign_crtc(encoder, NULL);
953 /* wait for frame_event_done completion */
954 if (_dpu_crtc_wait_for_frame_done(crtc))
955 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
957 atomic_read(&dpu_crtc->frame_pending));
959 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
960 dpu_crtc->enabled = false;
962 if (atomic_read(&dpu_crtc->frame_pending)) {
963 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
964 atomic_read(&dpu_crtc->frame_pending));
965 if (release_bandwidth)
966 dpu_core_perf_crtc_release_bw(crtc);
967 atomic_set(&dpu_crtc->frame_pending, 0);
970 dpu_core_perf_crtc_update(crtc, 0, true);
972 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
973 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
975 memset(cstate->mixers, 0, sizeof(cstate->mixers));
976 cstate->num_mixers = 0;
978 /* disable clk & bw control until clk & bw properties are set */
979 cstate->bw_control = false;
980 cstate->bw_split_vote = false;
982 if (crtc->state->event && !crtc->state->active) {
983 spin_lock_irqsave(&crtc->dev->event_lock, flags);
984 drm_crtc_send_vblank_event(crtc, crtc->state->event);
985 crtc->state->event = NULL;
986 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
989 pm_runtime_put_sync(crtc->dev->dev);
992 static void dpu_crtc_enable(struct drm_crtc *crtc,
993 struct drm_atomic_state *state)
995 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
996 struct drm_encoder *encoder;
997 bool request_bandwidth = false;
999 pm_runtime_get_sync(crtc->dev->dev);
1001 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1003 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
1004 /* in video mode, we hold an extra bandwidth reference
1005 * as we cannot drop bandwidth at frame-done if any
1006 * crtc is being used in video mode.
1008 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1009 request_bandwidth = true;
1010 dpu_encoder_register_frame_event_callback(encoder,
1011 dpu_crtc_frame_event_cb, (void *)crtc);
1014 if (request_bandwidth)
1015 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1017 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
1018 dpu_crtc->enabled = true;
1020 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
1021 dpu_encoder_assign_crtc(encoder, crtc);
1023 /* Enable/restore vblank irq handling */
1024 drm_crtc_vblank_on(crtc);
1027 struct plane_state {
1028 struct dpu_plane_state *dpu_pstate;
1029 const struct drm_plane_state *drm_pstate;
1034 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
1035 struct drm_atomic_state *state)
1037 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
1039 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1040 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
1041 struct plane_state *pstates;
1043 const struct drm_plane_state *pstate;
1044 struct drm_plane *plane;
1045 struct drm_display_mode *mode;
1047 int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
1049 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
1050 int multirect_count = 0;
1051 const struct drm_plane_state *pipe_staged[SSPP_MAX];
1052 int left_zpos_cnt = 0, right_zpos_cnt = 0;
1053 struct drm_rect crtc_rect = { 0 };
1055 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
1057 if (!crtc_state->enable || !crtc_state->active) {
1058 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
1059 crtc->base.id, crtc_state->enable,
1060 crtc_state->active);
1061 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
1065 mode = &crtc_state->adjusted_mode;
1066 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
1068 /* force a full mode set if active state changed */
1069 if (crtc_state->active_changed)
1070 crtc_state->mode_changed = true;
1072 memset(pipe_staged, 0, sizeof(pipe_staged));
1074 if (cstate->num_mixers) {
1075 mixer_width = mode->hdisplay / cstate->num_mixers;
1077 _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
1080 crtc_rect.x2 = mode->hdisplay;
1081 crtc_rect.y2 = mode->vdisplay;
1083 /* get plane state for all drm planes associated with crtc state */
1084 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
1085 struct drm_rect dst, clip = crtc_rect;
1087 if (IS_ERR_OR_NULL(pstate)) {
1088 rc = PTR_ERR(pstate);
1089 DPU_ERROR("%s: failed to get plane%d state, %d\n",
1090 dpu_crtc->name, plane->base.id, rc);
1093 if (cnt >= DPU_STAGE_MAX * 4)
1096 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
1097 pstates[cnt].drm_pstate = pstate;
1098 pstates[cnt].stage = pstate->normalized_zpos;
1099 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
1101 if (pipe_staged[pstates[cnt].pipe_id]) {
1102 multirect_plane[multirect_count].r0 =
1103 pipe_staged[pstates[cnt].pipe_id];
1104 multirect_plane[multirect_count].r1 = pstate;
1107 pipe_staged[pstates[cnt].pipe_id] = NULL;
1109 pipe_staged[pstates[cnt].pipe_id] = pstate;
1114 dst = drm_plane_state_dest(pstate);
1115 if (!drm_rect_intersect(&clip, &dst)) {
1116 DPU_ERROR("invalid vertical/horizontal destination\n");
1117 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
1118 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
1119 DRM_RECT_ARG(&dst));
1125 for (i = 1; i < SSPP_MAX; i++) {
1126 if (pipe_staged[i]) {
1127 dpu_plane_clear_multirect(pipe_staged[i]);
1129 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1131 "r1 only virt plane:%d not supported\n",
1132 pipe_staged[i]->plane->base.id);
1140 for (i = 0; i < cnt; i++) {
1141 /* reset counts at every new blend stage */
1142 if (pstates[i].stage != z_pos) {
1145 z_pos = pstates[i].stage;
1148 /* verify z_pos setting before using it */
1149 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1150 DPU_ERROR("> %d plane stages assigned\n",
1151 DPU_STAGE_MAX - DPU_STAGE_0);
1154 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1155 if (left_zpos_cnt == 2) {
1156 DPU_ERROR("> 2 planes @ stage %d on left\n",
1164 if (right_zpos_cnt == 2) {
1165 DPU_ERROR("> 2 planes @ stage %d on right\n",
1173 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1174 DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
1177 for (i = 0; i < multirect_count; i++) {
1178 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1180 "multirect validation failed for planes (%d - %d)\n",
1181 multirect_plane[i].r0->plane->base.id,
1182 multirect_plane[i].r1->plane->base.id);
1188 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1190 rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1192 DPU_ERROR("crtc%d failed performance check %d\n",
1197 /* validate source split:
1198 * use pstates sorted by stage to check planes on same stage
1199 * we assume that all pipes are in source split so its valid to compare
1200 * without taking into account left/right mixer placement
1202 for (i = 1; i < cnt; i++) {
1203 struct plane_state *prv_pstate, *cur_pstate;
1204 struct drm_rect left_rect, right_rect;
1205 int32_t left_pid, right_pid;
1208 prv_pstate = &pstates[i - 1];
1209 cur_pstate = &pstates[i];
1210 if (prv_pstate->stage != cur_pstate->stage)
1213 stage = cur_pstate->stage;
1215 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1216 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1218 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1219 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1221 if (right_rect.x1 < left_rect.x1) {
1222 swap(left_pid, right_pid);
1223 swap(left_rect, right_rect);
1227 * - planes are enumerated in pipe-priority order such that
1228 * planes with lower drm_id must be left-most in a shared
1229 * blend-stage when using source split.
1230 * - planes in source split must be contiguous in width
1231 * - planes in source split must have same dest yoff and height
1233 if (right_pid < left_pid) {
1235 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1236 stage, left_pid, right_pid);
1239 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1240 DPU_ERROR("non-contiguous coordinates for src split. "
1241 "stage: %d left: " DRM_RECT_FMT " right: "
1242 DRM_RECT_FMT "\n", stage,
1243 DRM_RECT_ARG(&left_rect),
1244 DRM_RECT_ARG(&right_rect));
1247 } else if (left_rect.y1 != right_rect.y1 ||
1248 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1249 DPU_ERROR("source split at stage: %d. invalid "
1250 "yoff/height: left: " DRM_RECT_FMT " right: "
1251 DRM_RECT_FMT "\n", stage,
1252 DRM_RECT_ARG(&left_rect),
1253 DRM_RECT_ARG(&right_rect));
1264 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1266 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1267 struct drm_encoder *enc;
1269 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1272 * Normally we would iterate through encoder_mask in crtc state to find
1273 * attached encoders. In this case, we might be disabling vblank _after_
1274 * encoder_mask has been cleared.
1276 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1277 * disable (which is also after encoder_mask is cleared). So instead of
1278 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1279 * currently assigned to our crtc.
1281 * Note also that this function cannot be called while crtc is disabled
1282 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1283 * about the assigned crtcs being inconsistent with the current state
1284 * (which means no need to worry about modeset locks).
1286 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1287 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1290 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1296 #ifdef CONFIG_DEBUG_FS
1297 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1299 struct dpu_crtc *dpu_crtc;
1300 struct dpu_plane_state *pstate = NULL;
1301 struct dpu_crtc_mixer *m;
1303 struct drm_crtc *crtc;
1304 struct drm_plane *plane;
1305 struct drm_display_mode *mode;
1306 struct drm_framebuffer *fb;
1307 struct drm_plane_state *state;
1308 struct dpu_crtc_state *cstate;
1312 dpu_crtc = s->private;
1313 crtc = &dpu_crtc->base;
1315 drm_modeset_lock_all(crtc->dev);
1316 cstate = to_dpu_crtc_state(crtc->state);
1318 mode = &crtc->state->adjusted_mode;
1319 out_width = mode->hdisplay / cstate->num_mixers;
1321 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1322 mode->hdisplay, mode->vdisplay);
1326 for (i = 0; i < cstate->num_mixers; ++i) {
1327 m = &cstate->mixers[i];
1328 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1329 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1330 out_width, mode->vdisplay);
1335 drm_atomic_crtc_for_each_plane(plane, crtc) {
1336 pstate = to_dpu_plane_state(plane->state);
1337 state = plane->state;
1339 if (!pstate || !state)
1342 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1345 if (plane->state->fb) {
1346 fb = plane->state->fb;
1348 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1349 fb->base.id, (char *) &fb->format->format,
1350 fb->width, fb->height);
1351 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1352 seq_printf(s, "cpp[%d]:%u ",
1353 i, fb->format->cpp[i]);
1354 seq_puts(s, "\n\t");
1356 seq_printf(s, "modifier:%8llu ", fb->modifier);
1360 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1361 seq_printf(s, "pitches[%d]:%8u ", i,
1366 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1367 seq_printf(s, "offsets[%d]:%8u ", i,
1372 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1373 state->src_x, state->src_y, state->src_w, state->src_h);
1375 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1376 state->crtc_x, state->crtc_y, state->crtc_w,
1378 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1379 pstate->multirect_mode, pstate->multirect_index);
1383 if (dpu_crtc->vblank_cb_count) {
1384 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1385 s64 diff_ms = ktime_to_ms(diff);
1386 s64 fps = diff_ms ? div_s64(
1387 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1390 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1391 fps, dpu_crtc->vblank_cb_count,
1392 ktime_to_ms(diff), dpu_crtc->play_count);
1394 /* reset time & count for next measurement */
1395 dpu_crtc->vblank_cb_count = 0;
1396 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1399 drm_modeset_unlock_all(crtc->dev);
1404 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1406 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1408 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1409 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1411 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1412 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1413 seq_printf(s, "core_clk_rate: %llu\n",
1414 dpu_crtc->cur_perf.core_clk_rate);
1415 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1416 seq_printf(s, "max_per_pipe_ib: %llu\n",
1417 dpu_crtc->cur_perf.max_per_pipe_ib);
1421 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1423 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1425 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1427 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1428 crtc->dev->primary->debugfs_root);
1430 debugfs_create_file("status", 0400,
1431 dpu_crtc->debugfs_root,
1432 dpu_crtc, &_dpu_debugfs_status_fops);
1433 debugfs_create_file("state", 0600,
1434 dpu_crtc->debugfs_root,
1436 &dpu_crtc_debugfs_state_fops);
1441 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1445 #endif /* CONFIG_DEBUG_FS */
1447 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1449 return _dpu_crtc_init_debugfs(crtc);
1452 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1454 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1456 debugfs_remove_recursive(dpu_crtc->debugfs_root);
1459 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1460 .set_config = drm_atomic_helper_set_config,
1461 .destroy = dpu_crtc_destroy,
1462 .page_flip = drm_atomic_helper_page_flip,
1463 .reset = dpu_crtc_reset,
1464 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1465 .atomic_destroy_state = dpu_crtc_destroy_state,
1466 .late_register = dpu_crtc_late_register,
1467 .early_unregister = dpu_crtc_early_unregister,
1468 .verify_crc_source = dpu_crtc_verify_crc_source,
1469 .set_crc_source = dpu_crtc_set_crc_source,
1470 .enable_vblank = msm_crtc_enable_vblank,
1471 .disable_vblank = msm_crtc_disable_vblank,
1472 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1473 .get_vblank_counter = dpu_crtc_get_vblank_counter,
1476 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1477 .atomic_disable = dpu_crtc_disable,
1478 .atomic_enable = dpu_crtc_enable,
1479 .atomic_check = dpu_crtc_atomic_check,
1480 .atomic_begin = dpu_crtc_atomic_begin,
1481 .atomic_flush = dpu_crtc_atomic_flush,
1482 .get_scanout_position = dpu_crtc_get_scanout_position,
1485 /* initialize crtc */
1486 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1487 struct drm_plane *cursor)
1489 struct drm_crtc *crtc = NULL;
1490 struct dpu_crtc *dpu_crtc = NULL;
1493 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1495 return ERR_PTR(-ENOMEM);
1497 crtc = &dpu_crtc->base;
1500 spin_lock_init(&dpu_crtc->spin_lock);
1501 atomic_set(&dpu_crtc->frame_pending, 0);
1503 init_completion(&dpu_crtc->frame_done_comp);
1505 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1507 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1508 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1509 list_add(&dpu_crtc->frame_events[i].list,
1510 &dpu_crtc->frame_event_list);
1511 kthread_init_work(&dpu_crtc->frame_events[i].work,
1512 dpu_crtc_frame_event_work);
1515 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1518 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1520 drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1522 /* save user friendly CRTC name for later */
1523 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1525 /* initialize event handling */
1526 spin_lock_init(&dpu_crtc->event_lock);
1528 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);