Merge tag 'rtc-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_crtc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7
8 #define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
13
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
21
22 #include "dpu_kms.h"
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_crtc.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
29 #include "dpu_vbif.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
32
33 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
34 #define DPU_DRM_BLEND_OP_OPAQUE         1
35 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
36 #define DPU_DRM_BLEND_OP_COVERAGE       3
37 #define DPU_DRM_BLEND_OP_MAX            4
38
39 /* layer mixer index on dpu_crtc */
40 #define LEFT_MIXER 0
41 #define RIGHT_MIXER 1
42
43 /* timeout in ms waiting for frame done */
44 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS  60
45
46 #define CONVERT_S3_15(val) \
47         (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
48
49 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
50 {
51         struct msm_drm_private *priv = crtc->dev->dev_private;
52
53         return to_dpu_kms(priv->kms);
54 }
55
56 static void dpu_crtc_destroy(struct drm_crtc *crtc)
57 {
58         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59
60         if (!crtc)
61                 return;
62
63         drm_crtc_cleanup(crtc);
64         kfree(dpu_crtc);
65 }
66
67 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
68 {
69         struct drm_device *dev = crtc->dev;
70         struct drm_encoder *encoder;
71
72         drm_for_each_encoder(encoder, dev)
73                 if (encoder->crtc == crtc)
74                         return encoder;
75
76         return NULL;
77 }
78
79 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
80 {
81         struct drm_encoder *encoder;
82
83         encoder = get_encoder_from_crtc(crtc);
84         if (!encoder) {
85                 DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
86                 return false;
87         }
88
89         return dpu_encoder_get_frame_count(encoder);
90 }
91
92 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
93                                            bool in_vblank_irq,
94                                            int *vpos, int *hpos,
95                                            ktime_t *stime, ktime_t *etime,
96                                            const struct drm_display_mode *mode)
97 {
98         unsigned int pipe = crtc->index;
99         struct drm_encoder *encoder;
100         int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
101
102         encoder = get_encoder_from_crtc(crtc);
103         if (!encoder) {
104                 DRM_ERROR("no encoder found for crtc %d\n", pipe);
105                 return false;
106         }
107
108         vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
109         vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
110
111         /*
112          * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
113          * the end of VFP. Translate the porch values relative to the line
114          * counter positions.
115          */
116
117         vactive_start = vsw + vbp + 1;
118         vactive_end = vactive_start + mode->crtc_vdisplay;
119
120         /* last scan line before VSYNC */
121         vfp_end = mode->crtc_vtotal;
122
123         if (stime)
124                 *stime = ktime_get();
125
126         line = dpu_encoder_get_linecount(encoder);
127
128         if (line < vactive_start)
129                 line -= vactive_start;
130         else if (line > vactive_end)
131                 line = line - vfp_end - vactive_start;
132         else
133                 line -= vactive_start;
134
135         *vpos = line;
136         *hpos = 0;
137
138         if (etime)
139                 *etime = ktime_get();
140
141         return true;
142 }
143
144 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
145                 struct dpu_plane_state *pstate, struct dpu_format *format)
146 {
147         struct dpu_hw_mixer *lm = mixer->hw_lm;
148         uint32_t blend_op;
149
150         /* default to opaque blending */
151         blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
152                 DPU_BLEND_BG_ALPHA_BG_CONST;
153
154         if (format->alpha_enable) {
155                 /* coverage blending */
156                 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
157                         DPU_BLEND_BG_ALPHA_FG_PIXEL |
158                         DPU_BLEND_BG_INV_ALPHA;
159         }
160
161         lm->ops.setup_blend_config(lm, pstate->stage,
162                                 0xFF, 0, blend_op);
163
164         DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
165                   &format->base.pixel_format, format->alpha_enable, blend_op);
166 }
167
168 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
169 {
170         struct dpu_crtc_state *crtc_state;
171         int lm_idx, lm_horiz_position;
172
173         crtc_state = to_dpu_crtc_state(crtc->state);
174
175         lm_horiz_position = 0;
176         for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
177                 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
178                 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
179                 struct dpu_hw_mixer_cfg cfg;
180
181                 if (!lm_roi || !drm_rect_visible(lm_roi))
182                         continue;
183
184                 cfg.out_width = drm_rect_width(lm_roi);
185                 cfg.out_height = drm_rect_height(lm_roi);
186                 cfg.right_mixer = lm_horiz_position++;
187                 cfg.flags = 0;
188                 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
189         }
190 }
191
192 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
193         struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
194 {
195         struct drm_plane *plane;
196         struct drm_framebuffer *fb;
197         struct drm_plane_state *state;
198         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
199         struct dpu_plane_state *pstate = NULL;
200         struct dpu_format *format;
201         struct dpu_hw_ctl *ctl = mixer->lm_ctl;
202         struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
203
204         u32 flush_mask;
205         uint32_t stage_idx, lm_idx;
206         int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
207         bool bg_alpha_enable = false;
208         DECLARE_BITMAP(fetch_active, SSPP_MAX);
209
210         memset(fetch_active, 0, sizeof(fetch_active));
211         drm_atomic_crtc_for_each_plane(plane, crtc) {
212                 state = plane->state;
213                 if (!state)
214                         continue;
215
216                 pstate = to_dpu_plane_state(state);
217                 fb = state->fb;
218
219                 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
220                 set_bit(dpu_plane_pipe(plane), fetch_active);
221
222                 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
223                                 crtc->base.id,
224                                 pstate->stage,
225                                 plane->base.id,
226                                 dpu_plane_pipe(plane) - SSPP_VIG0,
227                                 state->fb ? state->fb->base.id : -1);
228
229                 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
230
231                 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
232                         bg_alpha_enable = true;
233
234                 stage_idx = zpos_cnt[pstate->stage]++;
235                 stage_cfg->stage[pstate->stage][stage_idx] =
236                                         dpu_plane_pipe(plane);
237                 stage_cfg->multirect_index[pstate->stage][stage_idx] =
238                                         pstate->multirect_index;
239
240                 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
241                                            state, pstate, stage_idx,
242                                            dpu_plane_pipe(plane) - SSPP_VIG0,
243                                            format->base.pixel_format,
244                                            fb ? fb->modifier : 0);
245
246                 /* blend config update */
247                 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
248                         _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
249                                                 pstate, format);
250
251                         mixer[lm_idx].flush_mask |= flush_mask;
252
253                         if (bg_alpha_enable && !format->alpha_enable)
254                                 mixer[lm_idx].mixer_op_mode = 0;
255                         else
256                                 mixer[lm_idx].mixer_op_mode |=
257                                                 1 << pstate->stage;
258                 }
259         }
260
261         if (ctl->ops.set_active_pipes)
262                 ctl->ops.set_active_pipes(ctl, fetch_active);
263
264          _dpu_crtc_program_lm_output_roi(crtc);
265 }
266
267 /**
268  * _dpu_crtc_blend_setup - configure crtc mixers
269  * @crtc: Pointer to drm crtc structure
270  */
271 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
272 {
273         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
274         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
275         struct dpu_crtc_mixer *mixer = cstate->mixers;
276         struct dpu_hw_ctl *ctl;
277         struct dpu_hw_mixer *lm;
278         int i;
279
280         DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
281
282         for (i = 0; i < cstate->num_mixers; i++) {
283                 mixer[i].mixer_op_mode = 0;
284                 mixer[i].flush_mask = 0;
285                 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
286                         mixer[i].lm_ctl->ops.clear_all_blendstages(
287                                         mixer[i].lm_ctl);
288         }
289
290         /* initialize stage cfg */
291         memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
292
293         _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
294
295         for (i = 0; i < cstate->num_mixers; i++) {
296                 ctl = mixer[i].lm_ctl;
297                 lm = mixer[i].hw_lm;
298
299                 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
300
301                 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
302                         mixer[i].hw_lm->idx);
303
304                 /* stage config flush mask */
305                 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
306
307                 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
308                         mixer[i].hw_lm->idx - LM_0,
309                         mixer[i].mixer_op_mode,
310                         ctl->idx - CTL_0,
311                         mixer[i].flush_mask);
312
313                 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
314                         &dpu_crtc->stage_cfg);
315         }
316 }
317
318 /**
319  *  _dpu_crtc_complete_flip - signal pending page_flip events
320  * Any pending vblank events are added to the vblank_event_list
321  * so that the next vblank interrupt shall signal them.
322  * However PAGE_FLIP events are not handled through the vblank_event_list.
323  * This API signals any pending PAGE_FLIP events requested through
324  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
325  * @crtc: Pointer to drm crtc structure
326  */
327 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
328 {
329         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
330         struct drm_device *dev = crtc->dev;
331         unsigned long flags;
332
333         spin_lock_irqsave(&dev->event_lock, flags);
334         if (dpu_crtc->event) {
335                 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
336                               dpu_crtc->event);
337                 trace_dpu_crtc_complete_flip(DRMID(crtc));
338                 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
339                 dpu_crtc->event = NULL;
340         }
341         spin_unlock_irqrestore(&dev->event_lock, flags);
342 }
343
344 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
345 {
346         struct drm_encoder *encoder;
347
348         /*
349          * TODO: This function is called from dpu debugfs and as part of atomic
350          * check. When called from debugfs, the crtc->mutex must be held to
351          * read crtc->state. However reading crtc->state from atomic check isn't
352          * allowed (unless you have a good reason, a big comment, and a deep
353          * understanding of how the atomic/modeset locks work (<- and this is
354          * probably not possible)). So we'll keep the WARN_ON here for now, but
355          * really we need to figure out a better way to track our operating mode
356          */
357         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
358
359         /* TODO: Returns the first INTF_MODE, could there be multiple values? */
360         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
361                 return dpu_encoder_get_intf_mode(encoder);
362
363         return INTF_MODE_NONE;
364 }
365
366 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
367 {
368         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
369
370         /* keep statistics on vblank callback - with auto reset via debugfs */
371         if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
372                 dpu_crtc->vblank_cb_time = ktime_get();
373         else
374                 dpu_crtc->vblank_cb_count++;
375         drm_crtc_handle_vblank(crtc);
376         trace_dpu_crtc_vblank_cb(DRMID(crtc));
377 }
378
379 static void dpu_crtc_frame_event_work(struct kthread_work *work)
380 {
381         struct dpu_crtc_frame_event *fevent = container_of(work,
382                         struct dpu_crtc_frame_event, work);
383         struct drm_crtc *crtc = fevent->crtc;
384         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
385         unsigned long flags;
386         bool frame_done = false;
387
388         DPU_ATRACE_BEGIN("crtc_frame_event");
389
390         DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
391                         ktime_to_ns(fevent->ts));
392
393         if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
394                                 | DPU_ENCODER_FRAME_EVENT_ERROR
395                                 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
396
397                 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
398                         /* ignore vblank when not pending */
399                 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
400                         /* release bandwidth and other resources */
401                         trace_dpu_crtc_frame_event_done(DRMID(crtc),
402                                                         fevent->event);
403                         dpu_core_perf_crtc_release_bw(crtc);
404                 } else {
405                         trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
406                                                                 fevent->event);
407                 }
408
409                 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
410                                         | DPU_ENCODER_FRAME_EVENT_ERROR))
411                         frame_done = true;
412         }
413
414         if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
415                 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
416                                 crtc->base.id, ktime_to_ns(fevent->ts));
417
418         if (frame_done)
419                 complete_all(&dpu_crtc->frame_done_comp);
420
421         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
422         list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
423         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
424         DPU_ATRACE_END("crtc_frame_event");
425 }
426
427 /*
428  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
429  * registers this API to encoder for all frame event callbacks like
430  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
431  * from different context - IRQ, user thread, commit_thread, etc. Each event
432  * should be carefully reviewed and should be processed in proper task context
433  * to avoid schedulin delay or properly manage the irq context's bottom half
434  * processing.
435  */
436 static void dpu_crtc_frame_event_cb(void *data, u32 event)
437 {
438         struct drm_crtc *crtc = (struct drm_crtc *)data;
439         struct dpu_crtc *dpu_crtc;
440         struct msm_drm_private *priv;
441         struct dpu_crtc_frame_event *fevent;
442         unsigned long flags;
443         u32 crtc_id;
444
445         /* Nothing to do on idle event */
446         if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
447                 return;
448
449         dpu_crtc = to_dpu_crtc(crtc);
450         priv = crtc->dev->dev_private;
451         crtc_id = drm_crtc_index(crtc);
452
453         trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
454
455         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
456         fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
457                         struct dpu_crtc_frame_event, list);
458         if (fevent)
459                 list_del_init(&fevent->list);
460         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
461
462         if (!fevent) {
463                 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
464                 return;
465         }
466
467         fevent->event = event;
468         fevent->crtc = crtc;
469         fevent->ts = ktime_get();
470         kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
471 }
472
473 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
474 {
475         trace_dpu_crtc_complete_commit(DRMID(crtc));
476         dpu_core_perf_crtc_update(crtc, 0, false);
477         _dpu_crtc_complete_flip(crtc);
478 }
479
480 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
481                 struct drm_crtc_state *state)
482 {
483         struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
484         struct drm_display_mode *adj_mode = &state->adjusted_mode;
485         u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
486         int i;
487
488         for (i = 0; i < cstate->num_mixers; i++) {
489                 struct drm_rect *r = &cstate->lm_bounds[i];
490                 r->x1 = crtc_split_width * i;
491                 r->y1 = 0;
492                 r->x2 = r->x1 + crtc_split_width;
493                 r->y2 = adj_mode->vdisplay;
494
495                 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
496         }
497 }
498
499 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
500                 struct dpu_hw_pcc_cfg *cfg)
501 {
502         struct drm_color_ctm *ctm;
503
504         memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
505
506         ctm = (struct drm_color_ctm *)state->ctm->data;
507
508         if (!ctm)
509                 return;
510
511         cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
512         cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
513         cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
514
515         cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
516         cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
517         cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
518
519         cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
520         cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
521         cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
522 }
523
524 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
525 {
526         struct drm_crtc_state *state = crtc->state;
527         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
528         struct dpu_crtc_mixer *mixer = cstate->mixers;
529         struct dpu_hw_pcc_cfg cfg;
530         struct dpu_hw_ctl *ctl;
531         struct dpu_hw_dspp *dspp;
532         int i;
533
534
535         if (!state->color_mgmt_changed)
536                 return;
537
538         for (i = 0; i < cstate->num_mixers; i++) {
539                 ctl = mixer[i].lm_ctl;
540                 dspp = mixer[i].hw_dspp;
541
542                 if (!dspp || !dspp->ops.setup_pcc)
543                         continue;
544
545                 if (!state->ctm) {
546                         dspp->ops.setup_pcc(dspp, NULL);
547                 } else {
548                         _dpu_crtc_get_pcc_coeff(state, &cfg);
549                         dspp->ops.setup_pcc(dspp, &cfg);
550                 }
551
552                 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
553                         mixer[i].hw_dspp->idx);
554
555                 /* stage config flush mask */
556                 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
557
558                 DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
559                         mixer[i].hw_lm->idx - DSPP_0,
560                         ctl->idx - CTL_0,
561                         mixer[i].flush_mask);
562         }
563 }
564
565 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
566                 struct drm_atomic_state *state)
567 {
568         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
569         struct drm_encoder *encoder;
570
571         if (!crtc->state->enable) {
572                 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
573                                 crtc->base.id, crtc->state->enable);
574                 return;
575         }
576
577         DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
578
579         _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
580
581         /* encoder will trigger pending mask now */
582         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
583                 dpu_encoder_trigger_kickoff_pending(encoder);
584
585         /*
586          * If no mixers have been allocated in dpu_crtc_atomic_check(),
587          * it means we are trying to flush a CRTC whose state is disabled:
588          * nothing else needs to be done.
589          */
590         if (unlikely(!cstate->num_mixers))
591                 return;
592
593         _dpu_crtc_blend_setup(crtc);
594
595         _dpu_crtc_setup_cp_blocks(crtc);
596
597         /*
598          * PP_DONE irq is only used by command mode for now.
599          * It is better to request pending before FLUSH and START trigger
600          * to make sure no pp_done irq missed.
601          * This is safe because no pp_done will happen before SW trigger
602          * in command mode.
603          */
604 }
605
606 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
607                 struct drm_atomic_state *state)
608 {
609         struct dpu_crtc *dpu_crtc;
610         struct drm_device *dev;
611         struct drm_plane *plane;
612         struct msm_drm_private *priv;
613         unsigned long flags;
614         struct dpu_crtc_state *cstate;
615
616         if (!crtc->state->enable) {
617                 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
618                                 crtc->base.id, crtc->state->enable);
619                 return;
620         }
621
622         DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
623
624         dpu_crtc = to_dpu_crtc(crtc);
625         cstate = to_dpu_crtc_state(crtc->state);
626         dev = crtc->dev;
627         priv = dev->dev_private;
628
629         if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
630                 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
631                 return;
632         }
633
634         WARN_ON(dpu_crtc->event);
635         spin_lock_irqsave(&dev->event_lock, flags);
636         dpu_crtc->event = crtc->state->event;
637         crtc->state->event = NULL;
638         spin_unlock_irqrestore(&dev->event_lock, flags);
639
640         /*
641          * If no mixers has been allocated in dpu_crtc_atomic_check(),
642          * it means we are trying to flush a CRTC whose state is disabled:
643          * nothing else needs to be done.
644          */
645         if (unlikely(!cstate->num_mixers))
646                 return;
647
648         /* update performance setting before crtc kickoff */
649         dpu_core_perf_crtc_update(crtc, 1, false);
650
651         /*
652          * Final plane updates: Give each plane a chance to complete all
653          *                      required writes/flushing before crtc's "flush
654          *                      everything" call below.
655          */
656         drm_atomic_crtc_for_each_plane(plane, crtc) {
657                 if (dpu_crtc->smmu_state.transition_error)
658                         dpu_plane_set_error(plane, true);
659                 dpu_plane_flush(plane);
660         }
661
662         /* Kickoff will be scheduled by outer layer */
663 }
664
665 /**
666  * dpu_crtc_destroy_state - state destroy hook
667  * @crtc: drm CRTC
668  * @state: CRTC state object to release
669  */
670 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
671                 struct drm_crtc_state *state)
672 {
673         struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
674
675         DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
676
677         __drm_atomic_helper_crtc_destroy_state(state);
678
679         kfree(cstate);
680 }
681
682 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
683 {
684         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
685         int ret, rc = 0;
686
687         if (!atomic_read(&dpu_crtc->frame_pending)) {
688                 DRM_DEBUG_ATOMIC("no frames pending\n");
689                 return 0;
690         }
691
692         DPU_ATRACE_BEGIN("frame done completion wait");
693         ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
694                         msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
695         if (!ret) {
696                 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
697                 rc = -ETIMEDOUT;
698         }
699         DPU_ATRACE_END("frame done completion wait");
700
701         return rc;
702 }
703
704 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
705 {
706         struct drm_encoder *encoder;
707         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
708         struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
709         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
710
711         /*
712          * If no mixers has been allocated in dpu_crtc_atomic_check(),
713          * it means we are trying to start a CRTC whose state is disabled:
714          * nothing else needs to be done.
715          */
716         if (unlikely(!cstate->num_mixers))
717                 return;
718
719         DPU_ATRACE_BEGIN("crtc_commit");
720
721         /*
722          * Encoder will flush/start now, unless it has a tx pending. If so, it
723          * may delay and flush at an irq event (e.g. ppdone)
724          */
725         drm_for_each_encoder_mask(encoder, crtc->dev,
726                                   crtc->state->encoder_mask)
727                 dpu_encoder_prepare_for_kickoff(encoder);
728
729         if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
730                 /* acquire bandwidth and other resources */
731                 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
732         } else
733                 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
734
735         dpu_crtc->play_count++;
736
737         dpu_vbif_clear_errors(dpu_kms);
738
739         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
740                 dpu_encoder_kickoff(encoder);
741
742         reinit_completion(&dpu_crtc->frame_done_comp);
743         DPU_ATRACE_END("crtc_commit");
744 }
745
746 static void dpu_crtc_reset(struct drm_crtc *crtc)
747 {
748         struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
749
750         if (crtc->state)
751                 dpu_crtc_destroy_state(crtc, crtc->state);
752
753         __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
754 }
755
756 /**
757  * dpu_crtc_duplicate_state - state duplicate hook
758  * @crtc: Pointer to drm crtc structure
759  */
760 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
761 {
762         struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
763
764         cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
765         if (!cstate) {
766                 DPU_ERROR("failed to allocate state\n");
767                 return NULL;
768         }
769
770         /* duplicate base helper */
771         __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
772
773         return &cstate->base;
774 }
775
776 static void dpu_crtc_disable(struct drm_crtc *crtc,
777                              struct drm_atomic_state *state)
778 {
779         struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
780                                                                               crtc);
781         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
782         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
783         struct drm_encoder *encoder;
784         unsigned long flags;
785         bool release_bandwidth = false;
786
787         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
788
789         /* Disable/save vblank irq handling */
790         drm_crtc_vblank_off(crtc);
791
792         drm_for_each_encoder_mask(encoder, crtc->dev,
793                                   old_crtc_state->encoder_mask) {
794                 /* in video mode, we hold an extra bandwidth reference
795                  * as we cannot drop bandwidth at frame-done if any
796                  * crtc is being used in video mode.
797                  */
798                 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
799                         release_bandwidth = true;
800                 dpu_encoder_assign_crtc(encoder, NULL);
801         }
802
803         /* wait for frame_event_done completion */
804         if (_dpu_crtc_wait_for_frame_done(crtc))
805                 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
806                                 crtc->base.id,
807                                 atomic_read(&dpu_crtc->frame_pending));
808
809         trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
810         dpu_crtc->enabled = false;
811
812         if (atomic_read(&dpu_crtc->frame_pending)) {
813                 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
814                                      atomic_read(&dpu_crtc->frame_pending));
815                 if (release_bandwidth)
816                         dpu_core_perf_crtc_release_bw(crtc);
817                 atomic_set(&dpu_crtc->frame_pending, 0);
818         }
819
820         dpu_core_perf_crtc_update(crtc, 0, true);
821
822         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
823                 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
824
825         memset(cstate->mixers, 0, sizeof(cstate->mixers));
826         cstate->num_mixers = 0;
827
828         /* disable clk & bw control until clk & bw properties are set */
829         cstate->bw_control = false;
830         cstate->bw_split_vote = false;
831
832         if (crtc->state->event && !crtc->state->active) {
833                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
834                 drm_crtc_send_vblank_event(crtc, crtc->state->event);
835                 crtc->state->event = NULL;
836                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
837         }
838
839         pm_runtime_put_sync(crtc->dev->dev);
840 }
841
842 static void dpu_crtc_enable(struct drm_crtc *crtc,
843                 struct drm_atomic_state *state)
844 {
845         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
846         struct drm_encoder *encoder;
847         bool request_bandwidth = false;
848
849         pm_runtime_get_sync(crtc->dev->dev);
850
851         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
852
853         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
854                 /* in video mode, we hold an extra bandwidth reference
855                  * as we cannot drop bandwidth at frame-done if any
856                  * crtc is being used in video mode.
857                  */
858                 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
859                         request_bandwidth = true;
860                 dpu_encoder_register_frame_event_callback(encoder,
861                                 dpu_crtc_frame_event_cb, (void *)crtc);
862         }
863
864         if (request_bandwidth)
865                 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
866
867         trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
868         dpu_crtc->enabled = true;
869
870         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
871                 dpu_encoder_assign_crtc(encoder, crtc);
872
873         /* Enable/restore vblank irq handling */
874         drm_crtc_vblank_on(crtc);
875 }
876
877 struct plane_state {
878         struct dpu_plane_state *dpu_pstate;
879         const struct drm_plane_state *drm_pstate;
880         int stage;
881         u32 pipe_id;
882 };
883
884 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
885                 struct drm_atomic_state *state)
886 {
887         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
888                                                                           crtc);
889         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
890         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
891         struct plane_state *pstates;
892
893         const struct drm_plane_state *pstate;
894         struct drm_plane *plane;
895         struct drm_display_mode *mode;
896
897         int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
898
899         struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
900         int multirect_count = 0;
901         const struct drm_plane_state *pipe_staged[SSPP_MAX];
902         int left_zpos_cnt = 0, right_zpos_cnt = 0;
903         struct drm_rect crtc_rect = { 0 };
904
905         pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
906
907         if (!crtc_state->enable || !crtc_state->active) {
908                 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
909                                 crtc->base.id, crtc_state->enable,
910                                 crtc_state->active);
911                 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
912                 goto end;
913         }
914
915         mode = &crtc_state->adjusted_mode;
916         DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
917
918         /* force a full mode set if active state changed */
919         if (crtc_state->active_changed)
920                 crtc_state->mode_changed = true;
921
922         memset(pipe_staged, 0, sizeof(pipe_staged));
923
924         if (cstate->num_mixers) {
925                 mixer_width = mode->hdisplay / cstate->num_mixers;
926
927                 _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
928         }
929
930         crtc_rect.x2 = mode->hdisplay;
931         crtc_rect.y2 = mode->vdisplay;
932
933          /* get plane state for all drm planes associated with crtc state */
934         drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
935                 struct drm_rect dst, clip = crtc_rect;
936
937                 if (IS_ERR_OR_NULL(pstate)) {
938                         rc = PTR_ERR(pstate);
939                         DPU_ERROR("%s: failed to get plane%d state, %d\n",
940                                         dpu_crtc->name, plane->base.id, rc);
941                         goto end;
942                 }
943                 if (cnt >= DPU_STAGE_MAX * 4)
944                         continue;
945
946                 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
947                 pstates[cnt].drm_pstate = pstate;
948                 pstates[cnt].stage = pstate->normalized_zpos;
949                 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
950
951                 if (pipe_staged[pstates[cnt].pipe_id]) {
952                         multirect_plane[multirect_count].r0 =
953                                 pipe_staged[pstates[cnt].pipe_id];
954                         multirect_plane[multirect_count].r1 = pstate;
955                         multirect_count++;
956
957                         pipe_staged[pstates[cnt].pipe_id] = NULL;
958                 } else {
959                         pipe_staged[pstates[cnt].pipe_id] = pstate;
960                 }
961
962                 cnt++;
963
964                 dst = drm_plane_state_dest(pstate);
965                 if (!drm_rect_intersect(&clip, &dst)) {
966                         DPU_ERROR("invalid vertical/horizontal destination\n");
967                         DPU_ERROR("display: " DRM_RECT_FMT " plane: "
968                                   DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
969                                   DRM_RECT_ARG(&dst));
970                         rc = -E2BIG;
971                         goto end;
972                 }
973         }
974
975         for (i = 1; i < SSPP_MAX; i++) {
976                 if (pipe_staged[i]) {
977                         dpu_plane_clear_multirect(pipe_staged[i]);
978
979                         if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
980                                 DPU_ERROR(
981                                         "r1 only virt plane:%d not supported\n",
982                                         pipe_staged[i]->plane->base.id);
983                                 rc  = -EINVAL;
984                                 goto end;
985                         }
986                 }
987         }
988
989         z_pos = -1;
990         for (i = 0; i < cnt; i++) {
991                 /* reset counts at every new blend stage */
992                 if (pstates[i].stage != z_pos) {
993                         left_zpos_cnt = 0;
994                         right_zpos_cnt = 0;
995                         z_pos = pstates[i].stage;
996                 }
997
998                 /* verify z_pos setting before using it */
999                 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1000                         DPU_ERROR("> %d plane stages assigned\n",
1001                                         DPU_STAGE_MAX - DPU_STAGE_0);
1002                         rc = -EINVAL;
1003                         goto end;
1004                 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1005                         if (left_zpos_cnt == 2) {
1006                                 DPU_ERROR("> 2 planes @ stage %d on left\n",
1007                                         z_pos);
1008                                 rc = -EINVAL;
1009                                 goto end;
1010                         }
1011                         left_zpos_cnt++;
1012
1013                 } else {
1014                         if (right_zpos_cnt == 2) {
1015                                 DPU_ERROR("> 2 planes @ stage %d on right\n",
1016                                         z_pos);
1017                                 rc = -EINVAL;
1018                                 goto end;
1019                         }
1020                         right_zpos_cnt++;
1021                 }
1022
1023                 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1024                 DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
1025         }
1026
1027         for (i = 0; i < multirect_count; i++) {
1028                 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1029                         DPU_ERROR(
1030                         "multirect validation failed for planes (%d - %d)\n",
1031                                         multirect_plane[i].r0->plane->base.id,
1032                                         multirect_plane[i].r1->plane->base.id);
1033                         rc = -EINVAL;
1034                         goto end;
1035                 }
1036         }
1037
1038         atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1039
1040         rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1041         if (rc) {
1042                 DPU_ERROR("crtc%d failed performance check %d\n",
1043                                 crtc->base.id, rc);
1044                 goto end;
1045         }
1046
1047         /* validate source split:
1048          * use pstates sorted by stage to check planes on same stage
1049          * we assume that all pipes are in source split so its valid to compare
1050          * without taking into account left/right mixer placement
1051          */
1052         for (i = 1; i < cnt; i++) {
1053                 struct plane_state *prv_pstate, *cur_pstate;
1054                 struct drm_rect left_rect, right_rect;
1055                 int32_t left_pid, right_pid;
1056                 int32_t stage;
1057
1058                 prv_pstate = &pstates[i - 1];
1059                 cur_pstate = &pstates[i];
1060                 if (prv_pstate->stage != cur_pstate->stage)
1061                         continue;
1062
1063                 stage = cur_pstate->stage;
1064
1065                 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1066                 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1067
1068                 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1069                 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1070
1071                 if (right_rect.x1 < left_rect.x1) {
1072                         swap(left_pid, right_pid);
1073                         swap(left_rect, right_rect);
1074                 }
1075
1076                 /**
1077                  * - planes are enumerated in pipe-priority order such that
1078                  *   planes with lower drm_id must be left-most in a shared
1079                  *   blend-stage when using source split.
1080                  * - planes in source split must be contiguous in width
1081                  * - planes in source split must have same dest yoff and height
1082                  */
1083                 if (right_pid < left_pid) {
1084                         DPU_ERROR(
1085                                 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1086                                 stage, left_pid, right_pid);
1087                         rc = -EINVAL;
1088                         goto end;
1089                 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1090                         DPU_ERROR("non-contiguous coordinates for src split. "
1091                                   "stage: %d left: " DRM_RECT_FMT " right: "
1092                                   DRM_RECT_FMT "\n", stage,
1093                                   DRM_RECT_ARG(&left_rect),
1094                                   DRM_RECT_ARG(&right_rect));
1095                         rc = -EINVAL;
1096                         goto end;
1097                 } else if (left_rect.y1 != right_rect.y1 ||
1098                            drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1099                         DPU_ERROR("source split at stage: %d. invalid "
1100                                   "yoff/height: left: " DRM_RECT_FMT " right: "
1101                                   DRM_RECT_FMT "\n", stage,
1102                                   DRM_RECT_ARG(&left_rect),
1103                                   DRM_RECT_ARG(&right_rect));
1104                         rc = -EINVAL;
1105                         goto end;
1106                 }
1107         }
1108
1109 end:
1110         kfree(pstates);
1111         return rc;
1112 }
1113
1114 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1115 {
1116         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1117         struct drm_encoder *enc;
1118
1119         trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1120
1121         /*
1122          * Normally we would iterate through encoder_mask in crtc state to find
1123          * attached encoders. In this case, we might be disabling vblank _after_
1124          * encoder_mask has been cleared.
1125          *
1126          * Instead, we "assign" a crtc to the encoder in enable and clear it in
1127          * disable (which is also after encoder_mask is cleared). So instead of
1128          * using encoder mask, we'll ask the encoder to toggle itself iff it's
1129          * currently assigned to our crtc.
1130          *
1131          * Note also that this function cannot be called while crtc is disabled
1132          * since we use drm_crtc_vblank_on/off. So we don't need to worry
1133          * about the assigned crtcs being inconsistent with the current state
1134          * (which means no need to worry about modeset locks).
1135          */
1136         list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1137                 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1138                                              dpu_crtc);
1139
1140                 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1141         }
1142
1143         return 0;
1144 }
1145
1146 #ifdef CONFIG_DEBUG_FS
1147 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1148 {
1149         struct dpu_crtc *dpu_crtc;
1150         struct dpu_plane_state *pstate = NULL;
1151         struct dpu_crtc_mixer *m;
1152
1153         struct drm_crtc *crtc;
1154         struct drm_plane *plane;
1155         struct drm_display_mode *mode;
1156         struct drm_framebuffer *fb;
1157         struct drm_plane_state *state;
1158         struct dpu_crtc_state *cstate;
1159
1160         int i, out_width;
1161
1162         dpu_crtc = s->private;
1163         crtc = &dpu_crtc->base;
1164
1165         drm_modeset_lock_all(crtc->dev);
1166         cstate = to_dpu_crtc_state(crtc->state);
1167
1168         mode = &crtc->state->adjusted_mode;
1169         out_width = mode->hdisplay / cstate->num_mixers;
1170
1171         seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1172                                 mode->hdisplay, mode->vdisplay);
1173
1174         seq_puts(s, "\n");
1175
1176         for (i = 0; i < cstate->num_mixers; ++i) {
1177                 m = &cstate->mixers[i];
1178                 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1179                         m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1180                         out_width, mode->vdisplay);
1181         }
1182
1183         seq_puts(s, "\n");
1184
1185         drm_atomic_crtc_for_each_plane(plane, crtc) {
1186                 pstate = to_dpu_plane_state(plane->state);
1187                 state = plane->state;
1188
1189                 if (!pstate || !state)
1190                         continue;
1191
1192                 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1193                         pstate->stage);
1194
1195                 if (plane->state->fb) {
1196                         fb = plane->state->fb;
1197
1198                         seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1199                                 fb->base.id, (char *) &fb->format->format,
1200                                 fb->width, fb->height);
1201                         for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1202                                 seq_printf(s, "cpp[%d]:%u ",
1203                                                 i, fb->format->cpp[i]);
1204                         seq_puts(s, "\n\t");
1205
1206                         seq_printf(s, "modifier:%8llu ", fb->modifier);
1207                         seq_puts(s, "\n");
1208
1209                         seq_puts(s, "\t");
1210                         for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1211                                 seq_printf(s, "pitches[%d]:%8u ", i,
1212                                                         fb->pitches[i]);
1213                         seq_puts(s, "\n");
1214
1215                         seq_puts(s, "\t");
1216                         for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1217                                 seq_printf(s, "offsets[%d]:%8u ", i,
1218                                                         fb->offsets[i]);
1219                         seq_puts(s, "\n");
1220                 }
1221
1222                 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1223                         state->src_x, state->src_y, state->src_w, state->src_h);
1224
1225                 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1226                         state->crtc_x, state->crtc_y, state->crtc_w,
1227                         state->crtc_h);
1228                 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1229                         pstate->multirect_mode, pstate->multirect_index);
1230
1231                 seq_puts(s, "\n");
1232         }
1233         if (dpu_crtc->vblank_cb_count) {
1234                 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1235                 s64 diff_ms = ktime_to_ms(diff);
1236                 s64 fps = diff_ms ? div_s64(
1237                                 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1238
1239                 seq_printf(s,
1240                         "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1241                                 fps, dpu_crtc->vblank_cb_count,
1242                                 ktime_to_ms(diff), dpu_crtc->play_count);
1243
1244                 /* reset time & count for next measurement */
1245                 dpu_crtc->vblank_cb_count = 0;
1246                 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1247         }
1248
1249         drm_modeset_unlock_all(crtc->dev);
1250
1251         return 0;
1252 }
1253
1254 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1255
1256 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1257 {
1258         struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1259         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1260
1261         seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1262         seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1263         seq_printf(s, "core_clk_rate: %llu\n",
1264                         dpu_crtc->cur_perf.core_clk_rate);
1265         seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1266         seq_printf(s, "max_per_pipe_ib: %llu\n",
1267                                 dpu_crtc->cur_perf.max_per_pipe_ib);
1268
1269         return 0;
1270 }
1271 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1272
1273 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1274 {
1275         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1276
1277         dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1278                         crtc->dev->primary->debugfs_root);
1279
1280         debugfs_create_file("status", 0400,
1281                         dpu_crtc->debugfs_root,
1282                         dpu_crtc, &_dpu_debugfs_status_fops);
1283         debugfs_create_file("state", 0600,
1284                         dpu_crtc->debugfs_root,
1285                         &dpu_crtc->base,
1286                         &dpu_crtc_debugfs_state_fops);
1287
1288         return 0;
1289 }
1290 #else
1291 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1292 {
1293         return 0;
1294 }
1295 #endif /* CONFIG_DEBUG_FS */
1296
1297 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1298 {
1299         return _dpu_crtc_init_debugfs(crtc);
1300 }
1301
1302 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1303 {
1304         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1305
1306         debugfs_remove_recursive(dpu_crtc->debugfs_root);
1307 }
1308
1309 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1310         .set_config = drm_atomic_helper_set_config,
1311         .destroy = dpu_crtc_destroy,
1312         .page_flip = drm_atomic_helper_page_flip,
1313         .reset = dpu_crtc_reset,
1314         .atomic_duplicate_state = dpu_crtc_duplicate_state,
1315         .atomic_destroy_state = dpu_crtc_destroy_state,
1316         .late_register = dpu_crtc_late_register,
1317         .early_unregister = dpu_crtc_early_unregister,
1318         .enable_vblank  = msm_crtc_enable_vblank,
1319         .disable_vblank = msm_crtc_disable_vblank,
1320         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1321         .get_vblank_counter = dpu_crtc_get_vblank_counter,
1322 };
1323
1324 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1325         .atomic_disable = dpu_crtc_disable,
1326         .atomic_enable = dpu_crtc_enable,
1327         .atomic_check = dpu_crtc_atomic_check,
1328         .atomic_begin = dpu_crtc_atomic_begin,
1329         .atomic_flush = dpu_crtc_atomic_flush,
1330         .get_scanout_position = dpu_crtc_get_scanout_position,
1331 };
1332
1333 /* initialize crtc */
1334 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1335                                 struct drm_plane *cursor)
1336 {
1337         struct drm_crtc *crtc = NULL;
1338         struct dpu_crtc *dpu_crtc = NULL;
1339         int i;
1340
1341         dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1342         if (!dpu_crtc)
1343                 return ERR_PTR(-ENOMEM);
1344
1345         crtc = &dpu_crtc->base;
1346         crtc->dev = dev;
1347
1348         spin_lock_init(&dpu_crtc->spin_lock);
1349         atomic_set(&dpu_crtc->frame_pending, 0);
1350
1351         init_completion(&dpu_crtc->frame_done_comp);
1352
1353         INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1354
1355         for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1356                 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1357                 list_add(&dpu_crtc->frame_events[i].list,
1358                                 &dpu_crtc->frame_event_list);
1359                 kthread_init_work(&dpu_crtc->frame_events[i].work,
1360                                 dpu_crtc_frame_event_work);
1361         }
1362
1363         drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1364                                 NULL);
1365
1366         drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1367
1368         drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1369
1370         /* save user friendly CRTC name for later */
1371         snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1372
1373         /* initialize event handling */
1374         spin_lock_init(&dpu_crtc->event_lock);
1375
1376         DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
1377         return crtc;
1378 }