1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
33 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0
34 #define DPU_DRM_BLEND_OP_OPAQUE 1
35 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
36 #define DPU_DRM_BLEND_OP_COVERAGE 3
37 #define DPU_DRM_BLEND_OP_MAX 4
39 /* layer mixer index on dpu_crtc */
43 /* timeout in ms waiting for frame done */
44 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
46 #define CONVERT_S3_15(val) \
47 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
49 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
51 struct msm_drm_private *priv = crtc->dev->dev_private;
53 return to_dpu_kms(priv->kms);
56 static void dpu_crtc_destroy(struct drm_crtc *crtc)
58 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
63 drm_crtc_cleanup(crtc);
67 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
69 struct drm_device *dev = crtc->dev;
70 struct drm_encoder *encoder;
72 drm_for_each_encoder(encoder, dev)
73 if (encoder->crtc == crtc)
79 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
81 struct drm_encoder *encoder;
83 encoder = get_encoder_from_crtc(crtc);
85 DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
89 return dpu_encoder_get_frame_count(encoder);
92 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
95 ktime_t *stime, ktime_t *etime,
96 const struct drm_display_mode *mode)
98 unsigned int pipe = crtc->index;
99 struct drm_encoder *encoder;
100 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
102 encoder = get_encoder_from_crtc(crtc);
104 DRM_ERROR("no encoder found for crtc %d\n", pipe);
108 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
109 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
112 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
113 * the end of VFP. Translate the porch values relative to the line
117 vactive_start = vsw + vbp + 1;
118 vactive_end = vactive_start + mode->crtc_vdisplay;
120 /* last scan line before VSYNC */
121 vfp_end = mode->crtc_vtotal;
124 *stime = ktime_get();
126 line = dpu_encoder_get_linecount(encoder);
128 if (line < vactive_start)
129 line -= vactive_start;
130 else if (line > vactive_end)
131 line = line - vfp_end - vactive_start;
133 line -= vactive_start;
139 *etime = ktime_get();
144 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
145 struct dpu_plane_state *pstate, struct dpu_format *format)
147 struct dpu_hw_mixer *lm = mixer->hw_lm;
150 /* default to opaque blending */
151 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
152 DPU_BLEND_BG_ALPHA_BG_CONST;
154 if (format->alpha_enable) {
155 /* coverage blending */
156 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
157 DPU_BLEND_BG_ALPHA_FG_PIXEL |
158 DPU_BLEND_BG_INV_ALPHA;
161 lm->ops.setup_blend_config(lm, pstate->stage,
164 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
165 &format->base.pixel_format, format->alpha_enable, blend_op);
168 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
170 struct dpu_crtc_state *crtc_state;
171 int lm_idx, lm_horiz_position;
173 crtc_state = to_dpu_crtc_state(crtc->state);
175 lm_horiz_position = 0;
176 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
177 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
178 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
179 struct dpu_hw_mixer_cfg cfg;
181 if (!lm_roi || !drm_rect_visible(lm_roi))
184 cfg.out_width = drm_rect_width(lm_roi);
185 cfg.out_height = drm_rect_height(lm_roi);
186 cfg.right_mixer = lm_horiz_position++;
188 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
192 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
193 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
195 struct drm_plane *plane;
196 struct drm_framebuffer *fb;
197 struct drm_plane_state *state;
198 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
199 struct dpu_plane_state *pstate = NULL;
200 struct dpu_format *format;
201 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
202 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
205 uint32_t stage_idx, lm_idx;
206 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
207 bool bg_alpha_enable = false;
208 DECLARE_BITMAP(fetch_active, SSPP_MAX);
210 memset(fetch_active, 0, sizeof(fetch_active));
211 drm_atomic_crtc_for_each_plane(plane, crtc) {
212 state = plane->state;
216 pstate = to_dpu_plane_state(state);
219 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
220 set_bit(dpu_plane_pipe(plane), fetch_active);
222 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
226 dpu_plane_pipe(plane) - SSPP_VIG0,
227 state->fb ? state->fb->base.id : -1);
229 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
231 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
232 bg_alpha_enable = true;
234 stage_idx = zpos_cnt[pstate->stage]++;
235 stage_cfg->stage[pstate->stage][stage_idx] =
236 dpu_plane_pipe(plane);
237 stage_cfg->multirect_index[pstate->stage][stage_idx] =
238 pstate->multirect_index;
240 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
241 state, pstate, stage_idx,
242 dpu_plane_pipe(plane) - SSPP_VIG0,
243 format->base.pixel_format,
244 fb ? fb->modifier : 0);
246 /* blend config update */
247 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
248 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
251 mixer[lm_idx].flush_mask |= flush_mask;
253 if (bg_alpha_enable && !format->alpha_enable)
254 mixer[lm_idx].mixer_op_mode = 0;
256 mixer[lm_idx].mixer_op_mode |=
261 if (ctl->ops.set_active_pipes)
262 ctl->ops.set_active_pipes(ctl, fetch_active);
264 _dpu_crtc_program_lm_output_roi(crtc);
268 * _dpu_crtc_blend_setup - configure crtc mixers
269 * @crtc: Pointer to drm crtc structure
271 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
273 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
274 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
275 struct dpu_crtc_mixer *mixer = cstate->mixers;
276 struct dpu_hw_ctl *ctl;
277 struct dpu_hw_mixer *lm;
280 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
282 for (i = 0; i < cstate->num_mixers; i++) {
283 mixer[i].mixer_op_mode = 0;
284 mixer[i].flush_mask = 0;
285 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
286 mixer[i].lm_ctl->ops.clear_all_blendstages(
290 /* initialize stage cfg */
291 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
293 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
295 for (i = 0; i < cstate->num_mixers; i++) {
296 ctl = mixer[i].lm_ctl;
299 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
301 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
302 mixer[i].hw_lm->idx);
304 /* stage config flush mask */
305 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
307 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
308 mixer[i].hw_lm->idx - LM_0,
309 mixer[i].mixer_op_mode,
311 mixer[i].flush_mask);
313 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
314 &dpu_crtc->stage_cfg);
319 * _dpu_crtc_complete_flip - signal pending page_flip events
320 * Any pending vblank events are added to the vblank_event_list
321 * so that the next vblank interrupt shall signal them.
322 * However PAGE_FLIP events are not handled through the vblank_event_list.
323 * This API signals any pending PAGE_FLIP events requested through
324 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
325 * @crtc: Pointer to drm crtc structure
327 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
329 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
330 struct drm_device *dev = crtc->dev;
333 spin_lock_irqsave(&dev->event_lock, flags);
334 if (dpu_crtc->event) {
335 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
337 trace_dpu_crtc_complete_flip(DRMID(crtc));
338 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
339 dpu_crtc->event = NULL;
341 spin_unlock_irqrestore(&dev->event_lock, flags);
344 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
346 struct drm_encoder *encoder;
349 * TODO: This function is called from dpu debugfs and as part of atomic
350 * check. When called from debugfs, the crtc->mutex must be held to
351 * read crtc->state. However reading crtc->state from atomic check isn't
352 * allowed (unless you have a good reason, a big comment, and a deep
353 * understanding of how the atomic/modeset locks work (<- and this is
354 * probably not possible)). So we'll keep the WARN_ON here for now, but
355 * really we need to figure out a better way to track our operating mode
357 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
359 /* TODO: Returns the first INTF_MODE, could there be multiple values? */
360 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
361 return dpu_encoder_get_intf_mode(encoder);
363 return INTF_MODE_NONE;
366 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
368 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
370 /* keep statistics on vblank callback - with auto reset via debugfs */
371 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
372 dpu_crtc->vblank_cb_time = ktime_get();
374 dpu_crtc->vblank_cb_count++;
375 drm_crtc_handle_vblank(crtc);
376 trace_dpu_crtc_vblank_cb(DRMID(crtc));
379 static void dpu_crtc_frame_event_work(struct kthread_work *work)
381 struct dpu_crtc_frame_event *fevent = container_of(work,
382 struct dpu_crtc_frame_event, work);
383 struct drm_crtc *crtc = fevent->crtc;
384 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
386 bool frame_done = false;
388 DPU_ATRACE_BEGIN("crtc_frame_event");
390 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
391 ktime_to_ns(fevent->ts));
393 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
394 | DPU_ENCODER_FRAME_EVENT_ERROR
395 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
397 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
398 /* ignore vblank when not pending */
399 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
400 /* release bandwidth and other resources */
401 trace_dpu_crtc_frame_event_done(DRMID(crtc),
403 dpu_core_perf_crtc_release_bw(crtc);
405 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
409 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
410 dpu_core_perf_crtc_update(crtc, 0, false);
412 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
413 | DPU_ENCODER_FRAME_EVENT_ERROR))
417 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
418 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
419 crtc->base.id, ktime_to_ns(fevent->ts));
422 complete_all(&dpu_crtc->frame_done_comp);
424 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
425 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
426 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
427 DPU_ATRACE_END("crtc_frame_event");
431 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
432 * registers this API to encoder for all frame event callbacks like
433 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
434 * from different context - IRQ, user thread, commit_thread, etc. Each event
435 * should be carefully reviewed and should be processed in proper task context
436 * to avoid schedulin delay or properly manage the irq context's bottom half
439 static void dpu_crtc_frame_event_cb(void *data, u32 event)
441 struct drm_crtc *crtc = (struct drm_crtc *)data;
442 struct dpu_crtc *dpu_crtc;
443 struct msm_drm_private *priv;
444 struct dpu_crtc_frame_event *fevent;
448 /* Nothing to do on idle event */
449 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
452 dpu_crtc = to_dpu_crtc(crtc);
453 priv = crtc->dev->dev_private;
454 crtc_id = drm_crtc_index(crtc);
456 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
458 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
459 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
460 struct dpu_crtc_frame_event, list);
462 list_del_init(&fevent->list);
463 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
466 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
470 fevent->event = event;
472 fevent->ts = ktime_get();
473 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
476 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
478 trace_dpu_crtc_complete_commit(DRMID(crtc));
479 _dpu_crtc_complete_flip(crtc);
482 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
483 struct drm_crtc_state *state)
485 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
486 struct drm_display_mode *adj_mode = &state->adjusted_mode;
487 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
490 for (i = 0; i < cstate->num_mixers; i++) {
491 struct drm_rect *r = &cstate->lm_bounds[i];
492 r->x1 = crtc_split_width * i;
494 r->x2 = r->x1 + crtc_split_width;
495 r->y2 = adj_mode->vdisplay;
497 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
501 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
502 struct dpu_hw_pcc_cfg *cfg)
504 struct drm_color_ctm *ctm;
506 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
508 ctm = (struct drm_color_ctm *)state->ctm->data;
513 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
514 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
515 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
517 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
518 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
519 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
521 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
522 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
523 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
526 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
528 struct drm_crtc_state *state = crtc->state;
529 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
530 struct dpu_crtc_mixer *mixer = cstate->mixers;
531 struct dpu_hw_pcc_cfg cfg;
532 struct dpu_hw_ctl *ctl;
533 struct dpu_hw_dspp *dspp;
537 if (!state->color_mgmt_changed)
540 for (i = 0; i < cstate->num_mixers; i++) {
541 ctl = mixer[i].lm_ctl;
542 dspp = mixer[i].hw_dspp;
544 if (!dspp || !dspp->ops.setup_pcc)
548 dspp->ops.setup_pcc(dspp, NULL);
550 _dpu_crtc_get_pcc_coeff(state, &cfg);
551 dspp->ops.setup_pcc(dspp, &cfg);
554 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
555 mixer[i].hw_dspp->idx);
557 /* stage config flush mask */
558 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
560 DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
561 mixer[i].hw_lm->idx - DSPP_0,
563 mixer[i].flush_mask);
567 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
568 struct drm_atomic_state *state)
570 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
571 struct drm_encoder *encoder;
573 if (!crtc->state->enable) {
574 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
575 crtc->base.id, crtc->state->enable);
579 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
581 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
583 /* encoder will trigger pending mask now */
584 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
585 dpu_encoder_trigger_kickoff_pending(encoder);
588 * If no mixers have been allocated in dpu_crtc_atomic_check(),
589 * it means we are trying to flush a CRTC whose state is disabled:
590 * nothing else needs to be done.
592 if (unlikely(!cstate->num_mixers))
595 _dpu_crtc_blend_setup(crtc);
597 _dpu_crtc_setup_cp_blocks(crtc);
600 * PP_DONE irq is only used by command mode for now.
601 * It is better to request pending before FLUSH and START trigger
602 * to make sure no pp_done irq missed.
603 * This is safe because no pp_done will happen before SW trigger
608 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
609 struct drm_atomic_state *state)
611 struct dpu_crtc *dpu_crtc;
612 struct drm_device *dev;
613 struct drm_plane *plane;
614 struct msm_drm_private *priv;
616 struct dpu_crtc_state *cstate;
618 if (!crtc->state->enable) {
619 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
620 crtc->base.id, crtc->state->enable);
624 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
626 dpu_crtc = to_dpu_crtc(crtc);
627 cstate = to_dpu_crtc_state(crtc->state);
629 priv = dev->dev_private;
631 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
632 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
636 WARN_ON(dpu_crtc->event);
637 spin_lock_irqsave(&dev->event_lock, flags);
638 dpu_crtc->event = crtc->state->event;
639 crtc->state->event = NULL;
640 spin_unlock_irqrestore(&dev->event_lock, flags);
643 * If no mixers has been allocated in dpu_crtc_atomic_check(),
644 * it means we are trying to flush a CRTC whose state is disabled:
645 * nothing else needs to be done.
647 if (unlikely(!cstate->num_mixers))
650 /* update performance setting before crtc kickoff */
651 dpu_core_perf_crtc_update(crtc, 1, false);
654 * Final plane updates: Give each plane a chance to complete all
655 * required writes/flushing before crtc's "flush
656 * everything" call below.
658 drm_atomic_crtc_for_each_plane(plane, crtc) {
659 if (dpu_crtc->smmu_state.transition_error)
660 dpu_plane_set_error(plane, true);
661 dpu_plane_flush(plane);
664 /* Kickoff will be scheduled by outer layer */
668 * dpu_crtc_destroy_state - state destroy hook
670 * @state: CRTC state object to release
672 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
673 struct drm_crtc_state *state)
675 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
677 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
679 __drm_atomic_helper_crtc_destroy_state(state);
684 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
686 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
689 if (!atomic_read(&dpu_crtc->frame_pending)) {
690 DRM_DEBUG_ATOMIC("no frames pending\n");
694 DPU_ATRACE_BEGIN("frame done completion wait");
695 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
696 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
698 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
701 DPU_ATRACE_END("frame done completion wait");
706 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
708 struct drm_encoder *encoder;
709 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
710 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
711 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
714 * If no mixers has been allocated in dpu_crtc_atomic_check(),
715 * it means we are trying to start a CRTC whose state is disabled:
716 * nothing else needs to be done.
718 if (unlikely(!cstate->num_mixers))
721 DPU_ATRACE_BEGIN("crtc_commit");
724 * Encoder will flush/start now, unless it has a tx pending. If so, it
725 * may delay and flush at an irq event (e.g. ppdone)
727 drm_for_each_encoder_mask(encoder, crtc->dev,
728 crtc->state->encoder_mask)
729 dpu_encoder_prepare_for_kickoff(encoder);
731 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
732 /* acquire bandwidth and other resources */
733 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
735 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
737 dpu_crtc->play_count++;
739 dpu_vbif_clear_errors(dpu_kms);
741 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
742 dpu_encoder_kickoff(encoder);
744 reinit_completion(&dpu_crtc->frame_done_comp);
745 DPU_ATRACE_END("crtc_commit");
748 static void dpu_crtc_reset(struct drm_crtc *crtc)
750 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
753 dpu_crtc_destroy_state(crtc, crtc->state);
755 __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
759 * dpu_crtc_duplicate_state - state duplicate hook
760 * @crtc: Pointer to drm crtc structure
762 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
764 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
766 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
768 DPU_ERROR("failed to allocate state\n");
772 /* duplicate base helper */
773 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
775 return &cstate->base;
778 static void dpu_crtc_disable(struct drm_crtc *crtc,
779 struct drm_atomic_state *state)
781 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
783 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
784 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
785 struct drm_encoder *encoder;
787 bool release_bandwidth = false;
789 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
791 /* Disable/save vblank irq handling */
792 drm_crtc_vblank_off(crtc);
794 drm_for_each_encoder_mask(encoder, crtc->dev,
795 old_crtc_state->encoder_mask) {
796 /* in video mode, we hold an extra bandwidth reference
797 * as we cannot drop bandwidth at frame-done if any
798 * crtc is being used in video mode.
800 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
801 release_bandwidth = true;
802 dpu_encoder_assign_crtc(encoder, NULL);
805 /* wait for frame_event_done completion */
806 if (_dpu_crtc_wait_for_frame_done(crtc))
807 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
809 atomic_read(&dpu_crtc->frame_pending));
811 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
812 dpu_crtc->enabled = false;
814 if (atomic_read(&dpu_crtc->frame_pending)) {
815 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
816 atomic_read(&dpu_crtc->frame_pending));
817 if (release_bandwidth)
818 dpu_core_perf_crtc_release_bw(crtc);
819 atomic_set(&dpu_crtc->frame_pending, 0);
822 dpu_core_perf_crtc_update(crtc, 0, true);
824 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
825 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
827 memset(cstate->mixers, 0, sizeof(cstate->mixers));
828 cstate->num_mixers = 0;
830 /* disable clk & bw control until clk & bw properties are set */
831 cstate->bw_control = false;
832 cstate->bw_split_vote = false;
834 if (crtc->state->event && !crtc->state->active) {
835 spin_lock_irqsave(&crtc->dev->event_lock, flags);
836 drm_crtc_send_vblank_event(crtc, crtc->state->event);
837 crtc->state->event = NULL;
838 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
841 pm_runtime_put_sync(crtc->dev->dev);
844 static void dpu_crtc_enable(struct drm_crtc *crtc,
845 struct drm_atomic_state *state)
847 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
848 struct drm_encoder *encoder;
849 bool request_bandwidth = false;
851 pm_runtime_get_sync(crtc->dev->dev);
853 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
855 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
856 /* in video mode, we hold an extra bandwidth reference
857 * as we cannot drop bandwidth at frame-done if any
858 * crtc is being used in video mode.
860 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
861 request_bandwidth = true;
862 dpu_encoder_register_frame_event_callback(encoder,
863 dpu_crtc_frame_event_cb, (void *)crtc);
866 if (request_bandwidth)
867 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
869 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
870 dpu_crtc->enabled = true;
872 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
873 dpu_encoder_assign_crtc(encoder, crtc);
875 /* Enable/restore vblank irq handling */
876 drm_crtc_vblank_on(crtc);
880 struct dpu_plane_state *dpu_pstate;
881 const struct drm_plane_state *drm_pstate;
886 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
887 struct drm_atomic_state *state)
889 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
891 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
892 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
893 struct plane_state *pstates;
895 const struct drm_plane_state *pstate;
896 struct drm_plane *plane;
897 struct drm_display_mode *mode;
899 int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
901 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
902 int multirect_count = 0;
903 const struct drm_plane_state *pipe_staged[SSPP_MAX];
904 int left_zpos_cnt = 0, right_zpos_cnt = 0;
905 struct drm_rect crtc_rect = { 0 };
907 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
909 if (!crtc_state->enable || !crtc_state->active) {
910 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
911 crtc->base.id, crtc_state->enable,
913 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
917 mode = &crtc_state->adjusted_mode;
918 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
920 /* force a full mode set if active state changed */
921 if (crtc_state->active_changed)
922 crtc_state->mode_changed = true;
924 memset(pipe_staged, 0, sizeof(pipe_staged));
926 if (cstate->num_mixers) {
927 mixer_width = mode->hdisplay / cstate->num_mixers;
929 _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
932 crtc_rect.x2 = mode->hdisplay;
933 crtc_rect.y2 = mode->vdisplay;
935 /* get plane state for all drm planes associated with crtc state */
936 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
937 struct drm_rect dst, clip = crtc_rect;
939 if (IS_ERR_OR_NULL(pstate)) {
940 rc = PTR_ERR(pstate);
941 DPU_ERROR("%s: failed to get plane%d state, %d\n",
942 dpu_crtc->name, plane->base.id, rc);
945 if (cnt >= DPU_STAGE_MAX * 4)
948 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
949 pstates[cnt].drm_pstate = pstate;
950 pstates[cnt].stage = pstate->normalized_zpos;
951 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
953 if (pipe_staged[pstates[cnt].pipe_id]) {
954 multirect_plane[multirect_count].r0 =
955 pipe_staged[pstates[cnt].pipe_id];
956 multirect_plane[multirect_count].r1 = pstate;
959 pipe_staged[pstates[cnt].pipe_id] = NULL;
961 pipe_staged[pstates[cnt].pipe_id] = pstate;
966 dst = drm_plane_state_dest(pstate);
967 if (!drm_rect_intersect(&clip, &dst)) {
968 DPU_ERROR("invalid vertical/horizontal destination\n");
969 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
970 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
977 for (i = 1; i < SSPP_MAX; i++) {
978 if (pipe_staged[i]) {
979 dpu_plane_clear_multirect(pipe_staged[i]);
981 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
983 "r1 only virt plane:%d not supported\n",
984 pipe_staged[i]->plane->base.id);
992 for (i = 0; i < cnt; i++) {
993 /* reset counts at every new blend stage */
994 if (pstates[i].stage != z_pos) {
997 z_pos = pstates[i].stage;
1000 /* verify z_pos setting before using it */
1001 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1002 DPU_ERROR("> %d plane stages assigned\n",
1003 DPU_STAGE_MAX - DPU_STAGE_0);
1006 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1007 if (left_zpos_cnt == 2) {
1008 DPU_ERROR("> 2 planes @ stage %d on left\n",
1016 if (right_zpos_cnt == 2) {
1017 DPU_ERROR("> 2 planes @ stage %d on right\n",
1025 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1026 DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
1029 for (i = 0; i < multirect_count; i++) {
1030 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1032 "multirect validation failed for planes (%d - %d)\n",
1033 multirect_plane[i].r0->plane->base.id,
1034 multirect_plane[i].r1->plane->base.id);
1040 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1042 rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1044 DPU_ERROR("crtc%d failed performance check %d\n",
1049 /* validate source split:
1050 * use pstates sorted by stage to check planes on same stage
1051 * we assume that all pipes are in source split so its valid to compare
1052 * without taking into account left/right mixer placement
1054 for (i = 1; i < cnt; i++) {
1055 struct plane_state *prv_pstate, *cur_pstate;
1056 struct drm_rect left_rect, right_rect;
1057 int32_t left_pid, right_pid;
1060 prv_pstate = &pstates[i - 1];
1061 cur_pstate = &pstates[i];
1062 if (prv_pstate->stage != cur_pstate->stage)
1065 stage = cur_pstate->stage;
1067 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1068 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1070 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1071 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1073 if (right_rect.x1 < left_rect.x1) {
1074 swap(left_pid, right_pid);
1075 swap(left_rect, right_rect);
1079 * - planes are enumerated in pipe-priority order such that
1080 * planes with lower drm_id must be left-most in a shared
1081 * blend-stage when using source split.
1082 * - planes in source split must be contiguous in width
1083 * - planes in source split must have same dest yoff and height
1085 if (right_pid < left_pid) {
1087 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1088 stage, left_pid, right_pid);
1091 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1092 DPU_ERROR("non-contiguous coordinates for src split. "
1093 "stage: %d left: " DRM_RECT_FMT " right: "
1094 DRM_RECT_FMT "\n", stage,
1095 DRM_RECT_ARG(&left_rect),
1096 DRM_RECT_ARG(&right_rect));
1099 } else if (left_rect.y1 != right_rect.y1 ||
1100 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1101 DPU_ERROR("source split at stage: %d. invalid "
1102 "yoff/height: left: " DRM_RECT_FMT " right: "
1103 DRM_RECT_FMT "\n", stage,
1104 DRM_RECT_ARG(&left_rect),
1105 DRM_RECT_ARG(&right_rect));
1116 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1118 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1119 struct drm_encoder *enc;
1121 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1124 * Normally we would iterate through encoder_mask in crtc state to find
1125 * attached encoders. In this case, we might be disabling vblank _after_
1126 * encoder_mask has been cleared.
1128 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1129 * disable (which is also after encoder_mask is cleared). So instead of
1130 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1131 * currently assigned to our crtc.
1133 * Note also that this function cannot be called while crtc is disabled
1134 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1135 * about the assigned crtcs being inconsistent with the current state
1136 * (which means no need to worry about modeset locks).
1138 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1139 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1142 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1148 #ifdef CONFIG_DEBUG_FS
1149 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1151 struct dpu_crtc *dpu_crtc;
1152 struct dpu_plane_state *pstate = NULL;
1153 struct dpu_crtc_mixer *m;
1155 struct drm_crtc *crtc;
1156 struct drm_plane *plane;
1157 struct drm_display_mode *mode;
1158 struct drm_framebuffer *fb;
1159 struct drm_plane_state *state;
1160 struct dpu_crtc_state *cstate;
1164 dpu_crtc = s->private;
1165 crtc = &dpu_crtc->base;
1167 drm_modeset_lock_all(crtc->dev);
1168 cstate = to_dpu_crtc_state(crtc->state);
1170 mode = &crtc->state->adjusted_mode;
1171 out_width = mode->hdisplay / cstate->num_mixers;
1173 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1174 mode->hdisplay, mode->vdisplay);
1178 for (i = 0; i < cstate->num_mixers; ++i) {
1179 m = &cstate->mixers[i];
1180 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1181 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1182 out_width, mode->vdisplay);
1187 drm_atomic_crtc_for_each_plane(plane, crtc) {
1188 pstate = to_dpu_plane_state(plane->state);
1189 state = plane->state;
1191 if (!pstate || !state)
1194 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1197 if (plane->state->fb) {
1198 fb = plane->state->fb;
1200 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1201 fb->base.id, (char *) &fb->format->format,
1202 fb->width, fb->height);
1203 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1204 seq_printf(s, "cpp[%d]:%u ",
1205 i, fb->format->cpp[i]);
1206 seq_puts(s, "\n\t");
1208 seq_printf(s, "modifier:%8llu ", fb->modifier);
1212 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1213 seq_printf(s, "pitches[%d]:%8u ", i,
1218 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1219 seq_printf(s, "offsets[%d]:%8u ", i,
1224 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1225 state->src_x, state->src_y, state->src_w, state->src_h);
1227 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1228 state->crtc_x, state->crtc_y, state->crtc_w,
1230 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1231 pstate->multirect_mode, pstate->multirect_index);
1235 if (dpu_crtc->vblank_cb_count) {
1236 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1237 s64 diff_ms = ktime_to_ms(diff);
1238 s64 fps = diff_ms ? div_s64(
1239 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1242 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1243 fps, dpu_crtc->vblank_cb_count,
1244 ktime_to_ms(diff), dpu_crtc->play_count);
1246 /* reset time & count for next measurement */
1247 dpu_crtc->vblank_cb_count = 0;
1248 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1251 drm_modeset_unlock_all(crtc->dev);
1256 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1258 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1260 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1261 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1263 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1264 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1265 seq_printf(s, "core_clk_rate: %llu\n",
1266 dpu_crtc->cur_perf.core_clk_rate);
1267 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1268 seq_printf(s, "max_per_pipe_ib: %llu\n",
1269 dpu_crtc->cur_perf.max_per_pipe_ib);
1273 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1275 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1277 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1279 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1280 crtc->dev->primary->debugfs_root);
1282 debugfs_create_file("status", 0400,
1283 dpu_crtc->debugfs_root,
1284 dpu_crtc, &_dpu_debugfs_status_fops);
1285 debugfs_create_file("state", 0600,
1286 dpu_crtc->debugfs_root,
1288 &dpu_crtc_debugfs_state_fops);
1293 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1297 #endif /* CONFIG_DEBUG_FS */
1299 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1301 return _dpu_crtc_init_debugfs(crtc);
1304 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1306 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1308 debugfs_remove_recursive(dpu_crtc->debugfs_root);
1311 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1312 .set_config = drm_atomic_helper_set_config,
1313 .destroy = dpu_crtc_destroy,
1314 .page_flip = drm_atomic_helper_page_flip,
1315 .reset = dpu_crtc_reset,
1316 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1317 .atomic_destroy_state = dpu_crtc_destroy_state,
1318 .late_register = dpu_crtc_late_register,
1319 .early_unregister = dpu_crtc_early_unregister,
1320 .enable_vblank = msm_crtc_enable_vblank,
1321 .disable_vblank = msm_crtc_disable_vblank,
1322 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1323 .get_vblank_counter = dpu_crtc_get_vblank_counter,
1326 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1327 .atomic_disable = dpu_crtc_disable,
1328 .atomic_enable = dpu_crtc_enable,
1329 .atomic_check = dpu_crtc_atomic_check,
1330 .atomic_begin = dpu_crtc_atomic_begin,
1331 .atomic_flush = dpu_crtc_atomic_flush,
1332 .get_scanout_position = dpu_crtc_get_scanout_position,
1335 /* initialize crtc */
1336 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1337 struct drm_plane *cursor)
1339 struct drm_crtc *crtc = NULL;
1340 struct dpu_crtc *dpu_crtc = NULL;
1343 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1345 return ERR_PTR(-ENOMEM);
1347 crtc = &dpu_crtc->base;
1350 spin_lock_init(&dpu_crtc->spin_lock);
1351 atomic_set(&dpu_crtc->frame_pending, 0);
1353 init_completion(&dpu_crtc->frame_done_comp);
1355 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1357 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1358 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1359 list_add(&dpu_crtc->frame_events[i].list,
1360 &dpu_crtc->frame_event_list);
1361 kthread_init_work(&dpu_crtc->frame_events[i].work,
1362 dpu_crtc_frame_event_work);
1365 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1368 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1370 drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1372 /* save user friendly CRTC name for later */
1373 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1375 /* initialize event handling */
1376 spin_lock_init(&dpu_crtc->event_lock);
1378 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);