2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20 #include <linux/sort.h>
21 #include <linux/debugfs.h>
22 #include <linux/ktime.h>
23 #include <drm/drm_mode.h>
24 #include <drm/drm_crtc.h>
25 #include <drm/drm_crtc_helper.h>
26 #include <drm/drm_flip_work.h>
27 #include <drm/drm_rect.h>
30 #include "dpu_hw_lm.h"
31 #include "dpu_hw_ctl.h"
33 #include "dpu_plane.h"
34 #include "dpu_encoder.h"
36 #include "dpu_core_perf.h"
37 #include "dpu_trace.h"
39 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0
40 #define DPU_DRM_BLEND_OP_OPAQUE 1
41 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
42 #define DPU_DRM_BLEND_OP_COVERAGE 3
43 #define DPU_DRM_BLEND_OP_MAX 4
45 /* layer mixer index on dpu_crtc */
49 static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
50 struct drm_display_mode *mode)
52 return mode->hdisplay / cstate->num_mixers;
55 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
57 struct msm_drm_private *priv = crtc->dev->dev_private;
59 return to_dpu_kms(priv->kms);
62 static void dpu_crtc_destroy(struct drm_crtc *crtc)
64 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
71 drm_crtc_cleanup(crtc);
72 mutex_destroy(&dpu_crtc->crtc_lock);
76 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
77 struct dpu_plane_state *pstate, struct dpu_format *format)
79 struct dpu_hw_mixer *lm = mixer->hw_lm;
81 struct drm_format_name_buf format_name;
83 /* default to opaque blending */
84 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
85 DPU_BLEND_BG_ALPHA_BG_CONST;
87 if (format->alpha_enable) {
88 /* coverage blending */
89 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
90 DPU_BLEND_BG_ALPHA_FG_PIXEL |
91 DPU_BLEND_BG_INV_ALPHA;
94 lm->ops.setup_blend_config(lm, pstate->stage,
97 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
98 drm_get_format_name(format->base.pixel_format, &format_name),
99 format->alpha_enable, blend_op);
102 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
104 struct dpu_crtc *dpu_crtc;
105 struct dpu_crtc_state *crtc_state;
106 int lm_idx, lm_horiz_position;
108 dpu_crtc = to_dpu_crtc(crtc);
109 crtc_state = to_dpu_crtc_state(crtc->state);
111 lm_horiz_position = 0;
112 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
113 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
114 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
115 struct dpu_hw_mixer_cfg cfg;
117 if (!lm_roi || !drm_rect_visible(lm_roi))
120 cfg.out_width = drm_rect_width(lm_roi);
121 cfg.out_height = drm_rect_height(lm_roi);
122 cfg.right_mixer = lm_horiz_position++;
124 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
128 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
129 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
131 struct drm_plane *plane;
132 struct drm_framebuffer *fb;
133 struct drm_plane_state *state;
134 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
135 struct dpu_plane_state *pstate = NULL;
136 struct dpu_format *format;
137 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
138 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
141 uint32_t stage_idx, lm_idx;
142 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
143 bool bg_alpha_enable = false;
145 drm_atomic_crtc_for_each_plane(plane, crtc) {
146 state = plane->state;
150 pstate = to_dpu_plane_state(state);
153 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
155 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
159 dpu_plane_pipe(plane) - SSPP_VIG0,
160 state->fb ? state->fb->base.id : -1);
162 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
164 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
165 bg_alpha_enable = true;
167 stage_idx = zpos_cnt[pstate->stage]++;
168 stage_cfg->stage[pstate->stage][stage_idx] =
169 dpu_plane_pipe(plane);
170 stage_cfg->multirect_index[pstate->stage][stage_idx] =
171 pstate->multirect_index;
173 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
174 state, pstate, stage_idx,
175 dpu_plane_pipe(plane) - SSPP_VIG0,
176 format->base.pixel_format,
177 fb ? fb->modifier : 0);
179 /* blend config update */
180 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
181 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
184 mixer[lm_idx].flush_mask |= flush_mask;
186 if (bg_alpha_enable && !format->alpha_enable)
187 mixer[lm_idx].mixer_op_mode = 0;
189 mixer[lm_idx].mixer_op_mode |=
194 _dpu_crtc_program_lm_output_roi(crtc);
198 * _dpu_crtc_blend_setup - configure crtc mixers
199 * @crtc: Pointer to drm crtc structure
201 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
203 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
204 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
205 struct dpu_crtc_mixer *mixer = cstate->mixers;
206 struct dpu_hw_ctl *ctl;
207 struct dpu_hw_mixer *lm;
210 DPU_DEBUG("%s\n", dpu_crtc->name);
212 for (i = 0; i < cstate->num_mixers; i++) {
213 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
214 DPU_ERROR("invalid lm or ctl assigned to mixer\n");
217 mixer[i].mixer_op_mode = 0;
218 mixer[i].flush_mask = 0;
219 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
220 mixer[i].lm_ctl->ops.clear_all_blendstages(
224 /* initialize stage cfg */
225 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
227 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
229 for (i = 0; i < cstate->num_mixers; i++) {
230 ctl = mixer[i].lm_ctl;
233 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
235 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
236 mixer[i].hw_lm->idx);
238 /* stage config flush mask */
239 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
241 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
242 mixer[i].hw_lm->idx - LM_0,
243 mixer[i].mixer_op_mode,
245 mixer[i].flush_mask);
247 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
248 &dpu_crtc->stage_cfg);
253 * _dpu_crtc_complete_flip - signal pending page_flip events
254 * Any pending vblank events are added to the vblank_event_list
255 * so that the next vblank interrupt shall signal them.
256 * However PAGE_FLIP events are not handled through the vblank_event_list.
257 * This API signals any pending PAGE_FLIP events requested through
258 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
259 * @crtc: Pointer to drm crtc structure
261 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
263 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
264 struct drm_device *dev = crtc->dev;
267 spin_lock_irqsave(&dev->event_lock, flags);
268 if (dpu_crtc->event) {
269 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
271 trace_dpu_crtc_complete_flip(DRMID(crtc));
272 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
273 dpu_crtc->event = NULL;
275 spin_unlock_irqrestore(&dev->event_lock, flags);
278 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
280 struct drm_encoder *encoder;
282 if (!crtc || !crtc->dev) {
283 DPU_ERROR("invalid crtc\n");
284 return INTF_MODE_NONE;
287 drm_for_each_encoder(encoder, crtc->dev)
288 if (encoder->crtc == crtc)
289 return dpu_encoder_get_intf_mode(encoder);
291 return INTF_MODE_NONE;
294 static void dpu_crtc_vblank_cb(void *data)
296 struct drm_crtc *crtc = (struct drm_crtc *)data;
297 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
299 /* keep statistics on vblank callback - with auto reset via debugfs */
300 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
301 dpu_crtc->vblank_cb_time = ktime_get();
303 dpu_crtc->vblank_cb_count++;
304 _dpu_crtc_complete_flip(crtc);
305 drm_crtc_handle_vblank(crtc);
306 trace_dpu_crtc_vblank_cb(DRMID(crtc));
309 static void dpu_crtc_frame_event_work(struct kthread_work *work)
311 struct dpu_crtc_frame_event *fevent = container_of(work,
312 struct dpu_crtc_frame_event, work);
313 struct drm_crtc *crtc = fevent->crtc;
314 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
316 bool frame_done = false;
318 DPU_ATRACE_BEGIN("crtc_frame_event");
320 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
321 ktime_to_ns(fevent->ts));
323 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
324 | DPU_ENCODER_FRAME_EVENT_ERROR
325 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
327 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
328 /* this should not happen */
329 DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
332 ktime_to_ns(fevent->ts),
333 atomic_read(&dpu_crtc->frame_pending));
334 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
335 /* release bandwidth and other resources */
336 trace_dpu_crtc_frame_event_done(DRMID(crtc),
338 dpu_core_perf_crtc_release_bw(crtc);
340 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
344 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
345 dpu_core_perf_crtc_update(crtc, 0, false);
347 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
348 | DPU_ENCODER_FRAME_EVENT_ERROR))
352 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
353 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
354 crtc->base.id, ktime_to_ns(fevent->ts));
357 complete_all(&dpu_crtc->frame_done_comp);
359 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
360 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
361 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
362 DPU_ATRACE_END("crtc_frame_event");
366 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
367 * registers this API to encoder for all frame event callbacks like
368 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
369 * from different context - IRQ, user thread, commit_thread, etc. Each event
370 * should be carefully reviewed and should be processed in proper task context
371 * to avoid schedulin delay or properly manage the irq context's bottom half
374 static void dpu_crtc_frame_event_cb(void *data, u32 event)
376 struct drm_crtc *crtc = (struct drm_crtc *)data;
377 struct dpu_crtc *dpu_crtc;
378 struct msm_drm_private *priv;
379 struct dpu_crtc_frame_event *fevent;
383 /* Nothing to do on idle event */
384 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
387 dpu_crtc = to_dpu_crtc(crtc);
388 priv = crtc->dev->dev_private;
389 crtc_id = drm_crtc_index(crtc);
391 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
393 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
394 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
395 struct dpu_crtc_frame_event, list);
397 list_del_init(&fevent->list);
398 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
401 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
405 fevent->event = event;
407 fevent->ts = ktime_get();
408 kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
411 void dpu_crtc_complete_commit(struct drm_crtc *crtc,
412 struct drm_crtc_state *old_state)
414 if (!crtc || !crtc->state) {
415 DPU_ERROR("invalid crtc\n");
418 trace_dpu_crtc_complete_commit(DRMID(crtc));
421 static void _dpu_crtc_setup_mixer_for_encoder(
422 struct drm_crtc *crtc,
423 struct drm_encoder *enc)
425 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
426 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
427 struct dpu_rm *rm = &dpu_kms->rm;
428 struct dpu_crtc_mixer *mixer;
429 struct dpu_hw_ctl *last_valid_ctl = NULL;
431 struct dpu_rm_hw_iter lm_iter, ctl_iter;
433 dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
434 dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
436 /* Set up all the mixers and ctls reserved by this encoder */
437 for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
438 mixer = &cstate->mixers[i];
440 if (!dpu_rm_get_hw(rm, &lm_iter))
442 mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
444 /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
445 if (!dpu_rm_get_hw(rm, &ctl_iter)) {
446 DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
447 mixer->hw_lm->idx - LM_0);
448 mixer->lm_ctl = last_valid_ctl;
450 mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
451 last_valid_ctl = mixer->lm_ctl;
454 /* Shouldn't happen, mixers are always >= ctls */
455 if (!mixer->lm_ctl) {
456 DPU_ERROR("no valid ctls found for lm %d\n",
457 mixer->hw_lm->idx - LM_0);
461 mixer->encoder = enc;
463 cstate->num_mixers++;
464 DPU_DEBUG("setup mixer %d: lm %d\n",
465 i, mixer->hw_lm->idx - LM_0);
466 DPU_DEBUG("setup mixer %d: ctl %d\n",
467 i, mixer->lm_ctl->idx - CTL_0);
471 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
473 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
474 struct drm_encoder *enc;
476 mutex_lock(&dpu_crtc->crtc_lock);
477 /* Check for mixers on all encoders attached to this crtc */
478 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
479 if (enc->crtc != crtc)
482 _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
485 mutex_unlock(&dpu_crtc->crtc_lock);
488 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
489 struct drm_crtc_state *state)
491 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
492 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
493 struct drm_display_mode *adj_mode = &state->adjusted_mode;
494 u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
497 for (i = 0; i < cstate->num_mixers; i++) {
498 struct drm_rect *r = &cstate->lm_bounds[i];
499 r->x1 = crtc_split_width * i;
501 r->x2 = r->x1 + crtc_split_width;
502 r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
504 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
507 drm_mode_debug_printmodeline(adj_mode);
510 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
511 struct drm_crtc_state *old_state)
513 struct dpu_crtc *dpu_crtc;
514 struct dpu_crtc_state *cstate;
515 struct drm_encoder *encoder;
516 struct drm_device *dev;
518 struct dpu_crtc_smmu_state_data *smmu_state;
521 DPU_ERROR("invalid crtc\n");
525 if (!crtc->state->enable) {
526 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
527 crtc->base.id, crtc->state->enable);
531 DPU_DEBUG("crtc%d\n", crtc->base.id);
533 dpu_crtc = to_dpu_crtc(crtc);
534 cstate = to_dpu_crtc_state(crtc->state);
536 smmu_state = &dpu_crtc->smmu_state;
538 if (!cstate->num_mixers) {
539 _dpu_crtc_setup_mixers(crtc);
540 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
543 if (dpu_crtc->event) {
544 WARN_ON(dpu_crtc->event);
546 spin_lock_irqsave(&dev->event_lock, flags);
547 dpu_crtc->event = crtc->state->event;
548 crtc->state->event = NULL;
549 spin_unlock_irqrestore(&dev->event_lock, flags);
552 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
553 if (encoder->crtc != crtc)
556 /* encoder will trigger pending mask now */
557 dpu_encoder_trigger_kickoff_pending(encoder);
561 * If no mixers have been allocated in dpu_crtc_atomic_check(),
562 * it means we are trying to flush a CRTC whose state is disabled:
563 * nothing else needs to be done.
565 if (unlikely(!cstate->num_mixers))
568 _dpu_crtc_blend_setup(crtc);
571 * PP_DONE irq is only used by command mode for now.
572 * It is better to request pending before FLUSH and START trigger
573 * to make sure no pp_done irq missed.
574 * This is safe because no pp_done will happen before SW trigger
579 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
580 struct drm_crtc_state *old_crtc_state)
582 struct dpu_crtc *dpu_crtc;
583 struct drm_device *dev;
584 struct drm_plane *plane;
585 struct msm_drm_private *priv;
586 struct msm_drm_thread *event_thread;
588 struct dpu_crtc_state *cstate;
590 if (!crtc->state->enable) {
591 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
592 crtc->base.id, crtc->state->enable);
596 DPU_DEBUG("crtc%d\n", crtc->base.id);
598 dpu_crtc = to_dpu_crtc(crtc);
599 cstate = to_dpu_crtc_state(crtc->state);
601 priv = dev->dev_private;
603 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
604 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
608 event_thread = &priv->event_thread[crtc->index];
610 if (dpu_crtc->event) {
611 DPU_DEBUG("already received dpu_crtc->event\n");
613 spin_lock_irqsave(&dev->event_lock, flags);
614 dpu_crtc->event = crtc->state->event;
615 crtc->state->event = NULL;
616 spin_unlock_irqrestore(&dev->event_lock, flags);
620 * If no mixers has been allocated in dpu_crtc_atomic_check(),
621 * it means we are trying to flush a CRTC whose state is disabled:
622 * nothing else needs to be done.
624 if (unlikely(!cstate->num_mixers))
628 * For planes without commit update, drm framework will not add
629 * those planes to current state since hardware update is not
630 * required. However, if those planes were power collapsed since
631 * last commit cycle, driver has to restore the hardware state
632 * of those planes explicitly here prior to plane flush.
634 drm_atomic_crtc_for_each_plane(plane, crtc)
635 dpu_plane_restore(plane);
637 /* update performance setting before crtc kickoff */
638 dpu_core_perf_crtc_update(crtc, 1, false);
641 * Final plane updates: Give each plane a chance to complete all
642 * required writes/flushing before crtc's "flush
643 * everything" call below.
645 drm_atomic_crtc_for_each_plane(plane, crtc) {
646 if (dpu_crtc->smmu_state.transition_error)
647 dpu_plane_set_error(plane, true);
648 dpu_plane_flush(plane);
651 /* Kickoff will be scheduled by outer layer */
655 * dpu_crtc_destroy_state - state destroy hook
657 * @state: CRTC state object to release
659 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
660 struct drm_crtc_state *state)
662 struct dpu_crtc *dpu_crtc;
663 struct dpu_crtc_state *cstate;
665 if (!crtc || !state) {
666 DPU_ERROR("invalid argument(s)\n");
670 dpu_crtc = to_dpu_crtc(crtc);
671 cstate = to_dpu_crtc_state(state);
673 DPU_DEBUG("crtc%d\n", crtc->base.id);
675 __drm_atomic_helper_crtc_destroy_state(state);
680 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
682 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
685 if (!atomic_read(&dpu_crtc->frame_pending)) {
686 DPU_DEBUG("no frames pending\n");
690 DPU_ATRACE_BEGIN("frame done completion wait");
691 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
692 msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
694 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
697 DPU_ATRACE_END("frame done completion wait");
702 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
704 struct drm_encoder *encoder;
705 struct drm_device *dev = crtc->dev;
706 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
707 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
708 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
712 * If no mixers has been allocated in dpu_crtc_atomic_check(),
713 * it means we are trying to start a CRTC whose state is disabled:
714 * nothing else needs to be done.
716 if (unlikely(!cstate->num_mixers))
719 DPU_ATRACE_BEGIN("crtc_commit");
721 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
722 struct dpu_encoder_kickoff_params params = { 0 };
724 if (encoder->crtc != crtc)
728 * Encoder will flush/start now, unless it has a tx pending.
729 * If so, it may delay and flush at an irq event (e.g. ppdone)
731 dpu_encoder_prepare_for_kickoff(encoder, ¶ms, async);
736 /* wait for frame_event_done completion */
737 DPU_ATRACE_BEGIN("wait_for_frame_done_event");
738 ret = _dpu_crtc_wait_for_frame_done(crtc);
739 DPU_ATRACE_END("wait_for_frame_done_event");
741 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
743 atomic_read(&dpu_crtc->frame_pending));
747 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
748 /* acquire bandwidth and other resources */
749 DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
751 DPU_DEBUG("crtc%d commit\n", crtc->base.id);
753 dpu_crtc->play_count++;
756 dpu_vbif_clear_errors(dpu_kms);
758 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
759 if (encoder->crtc != crtc)
762 dpu_encoder_kickoff(encoder, async);
767 reinit_completion(&dpu_crtc->frame_done_comp);
768 DPU_ATRACE_END("crtc_commit");
772 * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
773 * @dpu_crtc: Pointer to dpu crtc structure
774 * @enable: Whether to enable/disable vblanks
776 static void _dpu_crtc_vblank_enable_no_lock(
777 struct dpu_crtc *dpu_crtc, bool enable)
779 struct drm_crtc *crtc = &dpu_crtc->base;
780 struct drm_device *dev = crtc->dev;
781 struct drm_encoder *enc;
784 pm_runtime_get_sync(dev->dev);
786 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
787 if (enc->crtc != crtc)
790 trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
794 dpu_encoder_register_vblank_callback(enc,
795 dpu_crtc_vblank_cb, (void *)crtc);
798 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
799 if (enc->crtc != crtc)
802 trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
806 dpu_encoder_register_vblank_callback(enc, NULL, NULL);
809 pm_runtime_put_sync(dev->dev);
813 static void dpu_crtc_reset(struct drm_crtc *crtc)
815 struct dpu_crtc_state *cstate;
818 dpu_crtc_destroy_state(crtc, crtc->state);
820 crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
822 crtc->state->crtc = crtc;
826 * dpu_crtc_duplicate_state - state duplicate hook
827 * @crtc: Pointer to drm crtc structure
828 * @Returns: Pointer to new drm_crtc_state structure
830 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
832 struct dpu_crtc *dpu_crtc;
833 struct dpu_crtc_state *cstate, *old_cstate;
835 if (!crtc || !crtc->state) {
836 DPU_ERROR("invalid argument(s)\n");
840 dpu_crtc = to_dpu_crtc(crtc);
841 old_cstate = to_dpu_crtc_state(crtc->state);
842 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
844 DPU_ERROR("failed to allocate state\n");
848 /* duplicate base helper */
849 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
851 return &cstate->base;
854 static void dpu_crtc_disable(struct drm_crtc *crtc)
856 struct dpu_crtc *dpu_crtc;
857 struct dpu_crtc_state *cstate;
858 struct drm_display_mode *mode;
859 struct drm_encoder *encoder;
860 struct msm_drm_private *priv;
863 if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
864 DPU_ERROR("invalid crtc\n");
867 dpu_crtc = to_dpu_crtc(crtc);
868 cstate = to_dpu_crtc_state(crtc->state);
869 mode = &cstate->base.adjusted_mode;
870 priv = crtc->dev->dev_private;
872 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
874 /* Disable/save vblank irq handling */
875 drm_crtc_vblank_off(crtc);
877 mutex_lock(&dpu_crtc->crtc_lock);
879 /* wait for frame_event_done completion */
880 if (_dpu_crtc_wait_for_frame_done(crtc))
881 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
883 atomic_read(&dpu_crtc->frame_pending));
885 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
886 if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
887 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
889 dpu_crtc->enabled = false;
891 if (atomic_read(&dpu_crtc->frame_pending)) {
892 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
893 atomic_read(&dpu_crtc->frame_pending));
894 dpu_core_perf_crtc_release_bw(crtc);
895 atomic_set(&dpu_crtc->frame_pending, 0);
898 dpu_core_perf_crtc_update(crtc, 0, true);
900 drm_for_each_encoder(encoder, crtc->dev) {
901 if (encoder->crtc != crtc)
903 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
906 memset(cstate->mixers, 0, sizeof(cstate->mixers));
907 cstate->num_mixers = 0;
909 /* disable clk & bw control until clk & bw properties are set */
910 cstate->bw_control = false;
911 cstate->bw_split_vote = false;
913 mutex_unlock(&dpu_crtc->crtc_lock);
915 if (crtc->state->event && !crtc->state->active) {
916 spin_lock_irqsave(&crtc->dev->event_lock, flags);
917 drm_crtc_send_vblank_event(crtc, crtc->state->event);
918 crtc->state->event = NULL;
919 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
923 static void dpu_crtc_enable(struct drm_crtc *crtc,
924 struct drm_crtc_state *old_crtc_state)
926 struct dpu_crtc *dpu_crtc;
927 struct drm_encoder *encoder;
928 struct msm_drm_private *priv;
930 if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
931 DPU_ERROR("invalid crtc\n");
934 priv = crtc->dev->dev_private;
936 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
937 dpu_crtc = to_dpu_crtc(crtc);
939 drm_for_each_encoder(encoder, crtc->dev) {
940 if (encoder->crtc != crtc)
942 dpu_encoder_register_frame_event_callback(encoder,
943 dpu_crtc_frame_event_cb, (void *)crtc);
946 mutex_lock(&dpu_crtc->crtc_lock);
947 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
948 if (!dpu_crtc->enabled && dpu_crtc->vblank_requested) {
949 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
951 dpu_crtc->enabled = true;
953 mutex_unlock(&dpu_crtc->crtc_lock);
955 /* Enable/restore vblank irq handling */
956 drm_crtc_vblank_on(crtc);
960 struct dpu_plane_state *dpu_pstate;
961 const struct drm_plane_state *drm_pstate;
966 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
967 struct drm_crtc_state *state)
969 struct dpu_crtc *dpu_crtc;
970 struct plane_state *pstates;
971 struct dpu_crtc_state *cstate;
973 const struct drm_plane_state *pstate;
974 struct drm_plane *plane;
975 struct drm_display_mode *mode;
977 int cnt = 0, rc = 0, mixer_width, i, z_pos;
979 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
980 int multirect_count = 0;
981 const struct drm_plane_state *pipe_staged[SSPP_MAX];
982 int left_zpos_cnt = 0, right_zpos_cnt = 0;
983 struct drm_rect crtc_rect = { 0 };
986 DPU_ERROR("invalid crtc\n");
990 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
992 dpu_crtc = to_dpu_crtc(crtc);
993 cstate = to_dpu_crtc_state(state);
995 if (!state->enable || !state->active) {
996 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
997 crtc->base.id, state->enable, state->active);
1001 mode = &state->adjusted_mode;
1002 DPU_DEBUG("%s: check", dpu_crtc->name);
1004 /* force a full mode set if active state changed */
1005 if (state->active_changed)
1006 state->mode_changed = true;
1008 memset(pipe_staged, 0, sizeof(pipe_staged));
1010 mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
1012 _dpu_crtc_setup_lm_bounds(crtc, state);
1014 crtc_rect.x2 = mode->hdisplay;
1015 crtc_rect.y2 = mode->vdisplay;
1017 /* get plane state for all drm planes associated with crtc state */
1018 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
1019 struct drm_rect dst, clip = crtc_rect;
1021 if (IS_ERR_OR_NULL(pstate)) {
1022 rc = PTR_ERR(pstate);
1023 DPU_ERROR("%s: failed to get plane%d state, %d\n",
1024 dpu_crtc->name, plane->base.id, rc);
1027 if (cnt >= DPU_STAGE_MAX * 4)
1030 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
1031 pstates[cnt].drm_pstate = pstate;
1032 pstates[cnt].stage = pstate->normalized_zpos;
1033 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
1035 if (pipe_staged[pstates[cnt].pipe_id]) {
1036 multirect_plane[multirect_count].r0 =
1037 pipe_staged[pstates[cnt].pipe_id];
1038 multirect_plane[multirect_count].r1 = pstate;
1041 pipe_staged[pstates[cnt].pipe_id] = NULL;
1043 pipe_staged[pstates[cnt].pipe_id] = pstate;
1048 dst = drm_plane_state_dest(pstate);
1049 if (!drm_rect_intersect(&clip, &dst)) {
1050 DPU_ERROR("invalid vertical/horizontal destination\n");
1051 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
1052 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
1053 DRM_RECT_ARG(&dst));
1059 for (i = 1; i < SSPP_MAX; i++) {
1060 if (pipe_staged[i]) {
1061 dpu_plane_clear_multirect(pipe_staged[i]);
1063 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1065 "r1 only virt plane:%d not supported\n",
1066 pipe_staged[i]->plane->base.id);
1074 for (i = 0; i < cnt; i++) {
1075 /* reset counts at every new blend stage */
1076 if (pstates[i].stage != z_pos) {
1079 z_pos = pstates[i].stage;
1082 /* verify z_pos setting before using it */
1083 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1084 DPU_ERROR("> %d plane stages assigned\n",
1085 DPU_STAGE_MAX - DPU_STAGE_0);
1088 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1089 if (left_zpos_cnt == 2) {
1090 DPU_ERROR("> 2 planes @ stage %d on left\n",
1098 if (right_zpos_cnt == 2) {
1099 DPU_ERROR("> 2 planes @ stage %d on right\n",
1107 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1108 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1111 for (i = 0; i < multirect_count; i++) {
1112 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1114 "multirect validation failed for planes (%d - %d)\n",
1115 multirect_plane[i].r0->plane->base.id,
1116 multirect_plane[i].r1->plane->base.id);
1122 rc = dpu_core_perf_crtc_check(crtc, state);
1124 DPU_ERROR("crtc%d failed performance check %d\n",
1129 /* validate source split:
1130 * use pstates sorted by stage to check planes on same stage
1131 * we assume that all pipes are in source split so its valid to compare
1132 * without taking into account left/right mixer placement
1134 for (i = 1; i < cnt; i++) {
1135 struct plane_state *prv_pstate, *cur_pstate;
1136 struct drm_rect left_rect, right_rect;
1137 int32_t left_pid, right_pid;
1140 prv_pstate = &pstates[i - 1];
1141 cur_pstate = &pstates[i];
1142 if (prv_pstate->stage != cur_pstate->stage)
1145 stage = cur_pstate->stage;
1147 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1148 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1150 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1151 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1153 if (right_rect.x1 < left_rect.x1) {
1154 swap(left_pid, right_pid);
1155 swap(left_rect, right_rect);
1159 * - planes are enumerated in pipe-priority order such that
1160 * planes with lower drm_id must be left-most in a shared
1161 * blend-stage when using source split.
1162 * - planes in source split must be contiguous in width
1163 * - planes in source split must have same dest yoff and height
1165 if (right_pid < left_pid) {
1167 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1168 stage, left_pid, right_pid);
1171 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1172 DPU_ERROR("non-contiguous coordinates for src split. "
1173 "stage: %d left: " DRM_RECT_FMT " right: "
1174 DRM_RECT_FMT "\n", stage,
1175 DRM_RECT_ARG(&left_rect),
1176 DRM_RECT_ARG(&right_rect));
1179 } else if (left_rect.y1 != right_rect.y1 ||
1180 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1181 DPU_ERROR("source split at stage: %d. invalid "
1182 "yoff/height: left: " DRM_RECT_FMT " right: "
1183 DRM_RECT_FMT "\n", stage,
1184 DRM_RECT_ARG(&left_rect),
1185 DRM_RECT_ARG(&right_rect));
1196 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1198 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1200 mutex_lock(&dpu_crtc->crtc_lock);
1201 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1202 if (dpu_crtc->enabled) {
1203 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
1205 dpu_crtc->vblank_requested = en;
1206 mutex_unlock(&dpu_crtc->crtc_lock);
1211 #ifdef CONFIG_DEBUG_FS
1212 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1214 struct dpu_crtc *dpu_crtc;
1215 struct dpu_plane_state *pstate = NULL;
1216 struct dpu_crtc_mixer *m;
1218 struct drm_crtc *crtc;
1219 struct drm_plane *plane;
1220 struct drm_display_mode *mode;
1221 struct drm_framebuffer *fb;
1222 struct drm_plane_state *state;
1223 struct dpu_crtc_state *cstate;
1227 if (!s || !s->private)
1230 dpu_crtc = s->private;
1231 crtc = &dpu_crtc->base;
1233 drm_modeset_lock_all(crtc->dev);
1234 cstate = to_dpu_crtc_state(crtc->state);
1236 mutex_lock(&dpu_crtc->crtc_lock);
1237 mode = &crtc->state->adjusted_mode;
1238 out_width = _dpu_crtc_get_mixer_width(cstate, mode);
1240 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1241 mode->hdisplay, mode->vdisplay);
1245 for (i = 0; i < cstate->num_mixers; ++i) {
1246 m = &cstate->mixers[i];
1248 seq_printf(s, "\tmixer[%d] has no lm\n", i);
1249 else if (!m->lm_ctl)
1250 seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1252 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1253 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1254 out_width, mode->vdisplay);
1259 drm_atomic_crtc_for_each_plane(plane, crtc) {
1260 pstate = to_dpu_plane_state(plane->state);
1261 state = plane->state;
1263 if (!pstate || !state)
1266 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1269 if (plane->state->fb) {
1270 fb = plane->state->fb;
1272 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1273 fb->base.id, (char *) &fb->format->format,
1274 fb->width, fb->height);
1275 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1276 seq_printf(s, "cpp[%d]:%u ",
1277 i, fb->format->cpp[i]);
1278 seq_puts(s, "\n\t");
1280 seq_printf(s, "modifier:%8llu ", fb->modifier);
1284 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1285 seq_printf(s, "pitches[%d]:%8u ", i,
1290 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1291 seq_printf(s, "offsets[%d]:%8u ", i,
1296 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1297 state->src_x, state->src_y, state->src_w, state->src_h);
1299 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1300 state->crtc_x, state->crtc_y, state->crtc_w,
1302 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1303 pstate->multirect_mode, pstate->multirect_index);
1307 if (dpu_crtc->vblank_cb_count) {
1308 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1309 s64 diff_ms = ktime_to_ms(diff);
1310 s64 fps = diff_ms ? div_s64(
1311 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1314 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1315 fps, dpu_crtc->vblank_cb_count,
1316 ktime_to_ms(diff), dpu_crtc->play_count);
1318 /* reset time & count for next measurement */
1319 dpu_crtc->vblank_cb_count = 0;
1320 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1323 seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
1325 mutex_unlock(&dpu_crtc->crtc_lock);
1326 drm_modeset_unlock_all(crtc->dev);
1331 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1333 return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1336 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
1337 static int __prefix ## _open(struct inode *inode, struct file *file) \
1339 return single_open(file, __prefix ## _show, inode->i_private); \
1341 static const struct file_operations __prefix ## _fops = { \
1342 .owner = THIS_MODULE, \
1343 .open = __prefix ## _open, \
1344 .release = single_release, \
1346 .llseek = seq_lseek, \
1349 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1351 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1352 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1355 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1356 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1357 seq_printf(s, "core_clk_rate: %llu\n",
1358 dpu_crtc->cur_perf.core_clk_rate);
1359 for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
1360 i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
1361 seq_printf(s, "bw_ctl[%d]: %llu\n", i,
1362 dpu_crtc->cur_perf.bw_ctl[i]);
1363 seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
1364 dpu_crtc->cur_perf.max_per_pipe_ib[i]);
1369 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1371 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1373 struct dpu_crtc *dpu_crtc;
1374 struct dpu_kms *dpu_kms;
1376 static const struct file_operations debugfs_status_fops = {
1377 .open = _dpu_debugfs_status_open,
1379 .llseek = seq_lseek,
1380 .release = single_release,
1385 dpu_crtc = to_dpu_crtc(crtc);
1387 dpu_kms = _dpu_crtc_get_kms(crtc);
1389 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1390 crtc->dev->primary->debugfs_root);
1391 if (!dpu_crtc->debugfs_root)
1394 /* don't error check these */
1395 debugfs_create_file("status", 0400,
1396 dpu_crtc->debugfs_root,
1397 dpu_crtc, &debugfs_status_fops);
1398 debugfs_create_file("state", 0600,
1399 dpu_crtc->debugfs_root,
1401 &dpu_crtc_debugfs_state_fops);
1406 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1408 struct dpu_crtc *dpu_crtc;
1412 dpu_crtc = to_dpu_crtc(crtc);
1413 debugfs_remove_recursive(dpu_crtc->debugfs_root);
1416 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1421 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1424 #endif /* CONFIG_DEBUG_FS */
1426 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1428 return _dpu_crtc_init_debugfs(crtc);
1431 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1433 _dpu_crtc_destroy_debugfs(crtc);
1436 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1437 .set_config = drm_atomic_helper_set_config,
1438 .destroy = dpu_crtc_destroy,
1439 .page_flip = drm_atomic_helper_page_flip,
1440 .reset = dpu_crtc_reset,
1441 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1442 .atomic_destroy_state = dpu_crtc_destroy_state,
1443 .late_register = dpu_crtc_late_register,
1444 .early_unregister = dpu_crtc_early_unregister,
1447 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1448 .disable = dpu_crtc_disable,
1449 .atomic_enable = dpu_crtc_enable,
1450 .atomic_check = dpu_crtc_atomic_check,
1451 .atomic_begin = dpu_crtc_atomic_begin,
1452 .atomic_flush = dpu_crtc_atomic_flush,
1455 /* initialize crtc */
1456 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1457 struct drm_plane *cursor)
1459 struct drm_crtc *crtc = NULL;
1460 struct dpu_crtc *dpu_crtc = NULL;
1461 struct msm_drm_private *priv = NULL;
1462 struct dpu_kms *kms = NULL;
1465 priv = dev->dev_private;
1466 kms = to_dpu_kms(priv->kms);
1468 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1470 return ERR_PTR(-ENOMEM);
1472 crtc = &dpu_crtc->base;
1475 mutex_init(&dpu_crtc->crtc_lock);
1476 spin_lock_init(&dpu_crtc->spin_lock);
1477 atomic_set(&dpu_crtc->frame_pending, 0);
1479 init_completion(&dpu_crtc->frame_done_comp);
1481 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1483 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1484 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1485 list_add(&dpu_crtc->frame_events[i].list,
1486 &dpu_crtc->frame_event_list);
1487 kthread_init_work(&dpu_crtc->frame_events[i].work,
1488 dpu_crtc_frame_event_work);
1491 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1494 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1497 /* save user friendly CRTC name for later */
1498 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1500 /* initialize event handling */
1501 spin_lock_init(&dpu_crtc->event_lock);
1503 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);