drm/msm/disp: Move various debug logs to atomic bucket
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_crtc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <robdclark@gmail.com>
6  */
7
8 #define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
13
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
21
22 #include "dpu_kms.h"
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_crtc.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
29 #include "dpu_vbif.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
32
33 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
34 #define DPU_DRM_BLEND_OP_OPAQUE         1
35 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
36 #define DPU_DRM_BLEND_OP_COVERAGE       3
37 #define DPU_DRM_BLEND_OP_MAX            4
38
39 /* layer mixer index on dpu_crtc */
40 #define LEFT_MIXER 0
41 #define RIGHT_MIXER 1
42
43 /* timeout in ms waiting for frame done */
44 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS  60
45
46 #define CONVERT_S3_15(val) \
47         (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
48
49 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
50 {
51         struct msm_drm_private *priv = crtc->dev->dev_private;
52
53         return to_dpu_kms(priv->kms);
54 }
55
56 static void dpu_crtc_destroy(struct drm_crtc *crtc)
57 {
58         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59
60         if (!crtc)
61                 return;
62
63         drm_crtc_cleanup(crtc);
64         kfree(dpu_crtc);
65 }
66
67 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
68 {
69         struct drm_device *dev = crtc->dev;
70         struct drm_encoder *encoder;
71
72         drm_for_each_encoder(encoder, dev)
73                 if (encoder->crtc == crtc)
74                         return encoder;
75
76         return NULL;
77 }
78
79 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
80 {
81         struct drm_encoder *encoder;
82
83         encoder = get_encoder_from_crtc(crtc);
84         if (!encoder) {
85                 DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
86                 return false;
87         }
88
89         return dpu_encoder_get_frame_count(encoder);
90 }
91
92 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
93                                            bool in_vblank_irq,
94                                            int *vpos, int *hpos,
95                                            ktime_t *stime, ktime_t *etime,
96                                            const struct drm_display_mode *mode)
97 {
98         unsigned int pipe = crtc->index;
99         struct drm_encoder *encoder;
100         int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
101
102         encoder = get_encoder_from_crtc(crtc);
103         if (!encoder) {
104                 DRM_ERROR("no encoder found for crtc %d\n", pipe);
105                 return false;
106         }
107
108         vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
109         vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
110
111         /*
112          * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
113          * the end of VFP. Translate the porch values relative to the line
114          * counter positions.
115          */
116
117         vactive_start = vsw + vbp + 1;
118         vactive_end = vactive_start + mode->crtc_vdisplay;
119
120         /* last scan line before VSYNC */
121         vfp_end = mode->crtc_vtotal;
122
123         if (stime)
124                 *stime = ktime_get();
125
126         line = dpu_encoder_get_linecount(encoder);
127
128         if (line < vactive_start)
129                 line -= vactive_start;
130         else if (line > vactive_end)
131                 line = line - vfp_end - vactive_start;
132         else
133                 line -= vactive_start;
134
135         *vpos = line;
136         *hpos = 0;
137
138         if (etime)
139                 *etime = ktime_get();
140
141         return true;
142 }
143
144 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
145                 struct dpu_plane_state *pstate, struct dpu_format *format)
146 {
147         struct dpu_hw_mixer *lm = mixer->hw_lm;
148         uint32_t blend_op;
149
150         /* default to opaque blending */
151         blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
152                 DPU_BLEND_BG_ALPHA_BG_CONST;
153
154         if (format->alpha_enable) {
155                 /* coverage blending */
156                 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
157                         DPU_BLEND_BG_ALPHA_FG_PIXEL |
158                         DPU_BLEND_BG_INV_ALPHA;
159         }
160
161         lm->ops.setup_blend_config(lm, pstate->stage,
162                                 0xFF, 0, blend_op);
163
164         DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
165                   &format->base.pixel_format, format->alpha_enable, blend_op);
166 }
167
168 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
169 {
170         struct dpu_crtc_state *crtc_state;
171         int lm_idx, lm_horiz_position;
172
173         crtc_state = to_dpu_crtc_state(crtc->state);
174
175         lm_horiz_position = 0;
176         for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
177                 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
178                 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
179                 struct dpu_hw_mixer_cfg cfg;
180
181                 if (!lm_roi || !drm_rect_visible(lm_roi))
182                         continue;
183
184                 cfg.out_width = drm_rect_width(lm_roi);
185                 cfg.out_height = drm_rect_height(lm_roi);
186                 cfg.right_mixer = lm_horiz_position++;
187                 cfg.flags = 0;
188                 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
189         }
190 }
191
192 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
193         struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
194 {
195         struct drm_plane *plane;
196         struct drm_framebuffer *fb;
197         struct drm_plane_state *state;
198         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
199         struct dpu_plane_state *pstate = NULL;
200         struct dpu_format *format;
201         struct dpu_hw_ctl *ctl = mixer->lm_ctl;
202         struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
203
204         u32 flush_mask;
205         uint32_t stage_idx, lm_idx;
206         int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
207         bool bg_alpha_enable = false;
208         DECLARE_BITMAP(fetch_active, SSPP_MAX);
209
210         memset(fetch_active, 0, sizeof(fetch_active));
211         drm_atomic_crtc_for_each_plane(plane, crtc) {
212                 state = plane->state;
213                 if (!state)
214                         continue;
215
216                 pstate = to_dpu_plane_state(state);
217                 fb = state->fb;
218
219                 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
220                 set_bit(dpu_plane_pipe(plane), fetch_active);
221
222                 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
223                                 crtc->base.id,
224                                 pstate->stage,
225                                 plane->base.id,
226                                 dpu_plane_pipe(plane) - SSPP_VIG0,
227                                 state->fb ? state->fb->base.id : -1);
228
229                 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
230
231                 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
232                         bg_alpha_enable = true;
233
234                 stage_idx = zpos_cnt[pstate->stage]++;
235                 stage_cfg->stage[pstate->stage][stage_idx] =
236                                         dpu_plane_pipe(plane);
237                 stage_cfg->multirect_index[pstate->stage][stage_idx] =
238                                         pstate->multirect_index;
239
240                 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
241                                            state, pstate, stage_idx,
242                                            dpu_plane_pipe(plane) - SSPP_VIG0,
243                                            format->base.pixel_format,
244                                            fb ? fb->modifier : 0);
245
246                 /* blend config update */
247                 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
248                         _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
249                                                 pstate, format);
250
251                         mixer[lm_idx].flush_mask |= flush_mask;
252
253                         if (bg_alpha_enable && !format->alpha_enable)
254                                 mixer[lm_idx].mixer_op_mode = 0;
255                         else
256                                 mixer[lm_idx].mixer_op_mode |=
257                                                 1 << pstate->stage;
258                 }
259         }
260
261         if (ctl->ops.set_active_pipes)
262                 ctl->ops.set_active_pipes(ctl, fetch_active);
263
264          _dpu_crtc_program_lm_output_roi(crtc);
265 }
266
267 /**
268  * _dpu_crtc_blend_setup - configure crtc mixers
269  * @crtc: Pointer to drm crtc structure
270  */
271 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
272 {
273         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
274         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
275         struct dpu_crtc_mixer *mixer = cstate->mixers;
276         struct dpu_hw_ctl *ctl;
277         struct dpu_hw_mixer *lm;
278         int i;
279
280         DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
281
282         for (i = 0; i < cstate->num_mixers; i++) {
283                 mixer[i].mixer_op_mode = 0;
284                 mixer[i].flush_mask = 0;
285                 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
286                         mixer[i].lm_ctl->ops.clear_all_blendstages(
287                                         mixer[i].lm_ctl);
288         }
289
290         /* initialize stage cfg */
291         memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
292
293         _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
294
295         for (i = 0; i < cstate->num_mixers; i++) {
296                 ctl = mixer[i].lm_ctl;
297                 lm = mixer[i].hw_lm;
298
299                 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
300
301                 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
302                         mixer[i].hw_lm->idx);
303
304                 /* stage config flush mask */
305                 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
306
307                 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
308                         mixer[i].hw_lm->idx - LM_0,
309                         mixer[i].mixer_op_mode,
310                         ctl->idx - CTL_0,
311                         mixer[i].flush_mask);
312
313                 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
314                         &dpu_crtc->stage_cfg);
315         }
316 }
317
318 /**
319  *  _dpu_crtc_complete_flip - signal pending page_flip events
320  * Any pending vblank events are added to the vblank_event_list
321  * so that the next vblank interrupt shall signal them.
322  * However PAGE_FLIP events are not handled through the vblank_event_list.
323  * This API signals any pending PAGE_FLIP events requested through
324  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
325  * @crtc: Pointer to drm crtc structure
326  */
327 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
328 {
329         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
330         struct drm_device *dev = crtc->dev;
331         unsigned long flags;
332
333         spin_lock_irqsave(&dev->event_lock, flags);
334         if (dpu_crtc->event) {
335                 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
336                               dpu_crtc->event);
337                 trace_dpu_crtc_complete_flip(DRMID(crtc));
338                 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
339                 dpu_crtc->event = NULL;
340         }
341         spin_unlock_irqrestore(&dev->event_lock, flags);
342 }
343
344 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
345 {
346         struct drm_encoder *encoder;
347
348         /*
349          * TODO: This function is called from dpu debugfs and as part of atomic
350          * check. When called from debugfs, the crtc->mutex must be held to
351          * read crtc->state. However reading crtc->state from atomic check isn't
352          * allowed (unless you have a good reason, a big comment, and a deep
353          * understanding of how the atomic/modeset locks work (<- and this is
354          * probably not possible)). So we'll keep the WARN_ON here for now, but
355          * really we need to figure out a better way to track our operating mode
356          */
357         WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
358
359         /* TODO: Returns the first INTF_MODE, could there be multiple values? */
360         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
361                 return dpu_encoder_get_intf_mode(encoder);
362
363         return INTF_MODE_NONE;
364 }
365
366 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
367 {
368         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
369
370         /* keep statistics on vblank callback - with auto reset via debugfs */
371         if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
372                 dpu_crtc->vblank_cb_time = ktime_get();
373         else
374                 dpu_crtc->vblank_cb_count++;
375         drm_crtc_handle_vblank(crtc);
376         trace_dpu_crtc_vblank_cb(DRMID(crtc));
377 }
378
379 static void dpu_crtc_frame_event_work(struct kthread_work *work)
380 {
381         struct dpu_crtc_frame_event *fevent = container_of(work,
382                         struct dpu_crtc_frame_event, work);
383         struct drm_crtc *crtc = fevent->crtc;
384         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
385         unsigned long flags;
386         bool frame_done = false;
387
388         DPU_ATRACE_BEGIN("crtc_frame_event");
389
390         DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
391                         ktime_to_ns(fevent->ts));
392
393         if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
394                                 | DPU_ENCODER_FRAME_EVENT_ERROR
395                                 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
396
397                 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
398                         /* ignore vblank when not pending */
399                 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
400                         /* release bandwidth and other resources */
401                         trace_dpu_crtc_frame_event_done(DRMID(crtc),
402                                                         fevent->event);
403                         dpu_core_perf_crtc_release_bw(crtc);
404                 } else {
405                         trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
406                                                                 fevent->event);
407                 }
408
409                 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
410                         dpu_core_perf_crtc_update(crtc, 0, false);
411
412                 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
413                                         | DPU_ENCODER_FRAME_EVENT_ERROR))
414                         frame_done = true;
415         }
416
417         if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
418                 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
419                                 crtc->base.id, ktime_to_ns(fevent->ts));
420
421         if (frame_done)
422                 complete_all(&dpu_crtc->frame_done_comp);
423
424         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
425         list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
426         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
427         DPU_ATRACE_END("crtc_frame_event");
428 }
429
430 /*
431  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
432  * registers this API to encoder for all frame event callbacks like
433  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
434  * from different context - IRQ, user thread, commit_thread, etc. Each event
435  * should be carefully reviewed and should be processed in proper task context
436  * to avoid schedulin delay or properly manage the irq context's bottom half
437  * processing.
438  */
439 static void dpu_crtc_frame_event_cb(void *data, u32 event)
440 {
441         struct drm_crtc *crtc = (struct drm_crtc *)data;
442         struct dpu_crtc *dpu_crtc;
443         struct msm_drm_private *priv;
444         struct dpu_crtc_frame_event *fevent;
445         unsigned long flags;
446         u32 crtc_id;
447
448         /* Nothing to do on idle event */
449         if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
450                 return;
451
452         dpu_crtc = to_dpu_crtc(crtc);
453         priv = crtc->dev->dev_private;
454         crtc_id = drm_crtc_index(crtc);
455
456         trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
457
458         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
459         fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
460                         struct dpu_crtc_frame_event, list);
461         if (fevent)
462                 list_del_init(&fevent->list);
463         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
464
465         if (!fevent) {
466                 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
467                 return;
468         }
469
470         fevent->event = event;
471         fevent->crtc = crtc;
472         fevent->ts = ktime_get();
473         kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
474 }
475
476 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
477 {
478         trace_dpu_crtc_complete_commit(DRMID(crtc));
479         _dpu_crtc_complete_flip(crtc);
480 }
481
482 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
483                 struct drm_crtc_state *state)
484 {
485         struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
486         struct drm_display_mode *adj_mode = &state->adjusted_mode;
487         u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
488         int i;
489
490         for (i = 0; i < cstate->num_mixers; i++) {
491                 struct drm_rect *r = &cstate->lm_bounds[i];
492                 r->x1 = crtc_split_width * i;
493                 r->y1 = 0;
494                 r->x2 = r->x1 + crtc_split_width;
495                 r->y2 = adj_mode->vdisplay;
496
497                 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
498         }
499 }
500
501 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
502                 struct dpu_hw_pcc_cfg *cfg)
503 {
504         struct drm_color_ctm *ctm;
505
506         memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
507
508         ctm = (struct drm_color_ctm *)state->ctm->data;
509
510         if (!ctm)
511                 return;
512
513         cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
514         cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
515         cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
516
517         cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
518         cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
519         cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
520
521         cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
522         cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
523         cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
524 }
525
526 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
527 {
528         struct drm_crtc_state *state = crtc->state;
529         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
530         struct dpu_crtc_mixer *mixer = cstate->mixers;
531         struct dpu_hw_pcc_cfg cfg;
532         struct dpu_hw_ctl *ctl;
533         struct dpu_hw_dspp *dspp;
534         int i;
535
536
537         if (!state->color_mgmt_changed)
538                 return;
539
540         for (i = 0; i < cstate->num_mixers; i++) {
541                 ctl = mixer[i].lm_ctl;
542                 dspp = mixer[i].hw_dspp;
543
544                 if (!dspp || !dspp->ops.setup_pcc)
545                         continue;
546
547                 if (!state->ctm) {
548                         dspp->ops.setup_pcc(dspp, NULL);
549                 } else {
550                         _dpu_crtc_get_pcc_coeff(state, &cfg);
551                         dspp->ops.setup_pcc(dspp, &cfg);
552                 }
553
554                 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
555                         mixer[i].hw_dspp->idx);
556
557                 /* stage config flush mask */
558                 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
559
560                 DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
561                         mixer[i].hw_lm->idx - DSPP_0,
562                         ctl->idx - CTL_0,
563                         mixer[i].flush_mask);
564         }
565 }
566
567 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
568                 struct drm_atomic_state *state)
569 {
570         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
571         struct drm_encoder *encoder;
572
573         if (!crtc->state->enable) {
574                 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
575                                 crtc->base.id, crtc->state->enable);
576                 return;
577         }
578
579         DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
580
581         _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
582
583         /* encoder will trigger pending mask now */
584         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
585                 dpu_encoder_trigger_kickoff_pending(encoder);
586
587         /*
588          * If no mixers have been allocated in dpu_crtc_atomic_check(),
589          * it means we are trying to flush a CRTC whose state is disabled:
590          * nothing else needs to be done.
591          */
592         if (unlikely(!cstate->num_mixers))
593                 return;
594
595         _dpu_crtc_blend_setup(crtc);
596
597         _dpu_crtc_setup_cp_blocks(crtc);
598
599         /*
600          * PP_DONE irq is only used by command mode for now.
601          * It is better to request pending before FLUSH and START trigger
602          * to make sure no pp_done irq missed.
603          * This is safe because no pp_done will happen before SW trigger
604          * in command mode.
605          */
606 }
607
608 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
609                 struct drm_atomic_state *state)
610 {
611         struct dpu_crtc *dpu_crtc;
612         struct drm_device *dev;
613         struct drm_plane *plane;
614         struct msm_drm_private *priv;
615         unsigned long flags;
616         struct dpu_crtc_state *cstate;
617
618         if (!crtc->state->enable) {
619                 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
620                                 crtc->base.id, crtc->state->enable);
621                 return;
622         }
623
624         DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
625
626         dpu_crtc = to_dpu_crtc(crtc);
627         cstate = to_dpu_crtc_state(crtc->state);
628         dev = crtc->dev;
629         priv = dev->dev_private;
630
631         if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
632                 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
633                 return;
634         }
635
636         WARN_ON(dpu_crtc->event);
637         spin_lock_irqsave(&dev->event_lock, flags);
638         dpu_crtc->event = crtc->state->event;
639         crtc->state->event = NULL;
640         spin_unlock_irqrestore(&dev->event_lock, flags);
641
642         /*
643          * If no mixers has been allocated in dpu_crtc_atomic_check(),
644          * it means we are trying to flush a CRTC whose state is disabled:
645          * nothing else needs to be done.
646          */
647         if (unlikely(!cstate->num_mixers))
648                 return;
649
650         /* update performance setting before crtc kickoff */
651         dpu_core_perf_crtc_update(crtc, 1, false);
652
653         /*
654          * Final plane updates: Give each plane a chance to complete all
655          *                      required writes/flushing before crtc's "flush
656          *                      everything" call below.
657          */
658         drm_atomic_crtc_for_each_plane(plane, crtc) {
659                 if (dpu_crtc->smmu_state.transition_error)
660                         dpu_plane_set_error(plane, true);
661                 dpu_plane_flush(plane);
662         }
663
664         /* Kickoff will be scheduled by outer layer */
665 }
666
667 /**
668  * dpu_crtc_destroy_state - state destroy hook
669  * @crtc: drm CRTC
670  * @state: CRTC state object to release
671  */
672 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
673                 struct drm_crtc_state *state)
674 {
675         struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
676
677         DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
678
679         __drm_atomic_helper_crtc_destroy_state(state);
680
681         kfree(cstate);
682 }
683
684 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
685 {
686         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
687         int ret, rc = 0;
688
689         if (!atomic_read(&dpu_crtc->frame_pending)) {
690                 DRM_DEBUG_ATOMIC("no frames pending\n");
691                 return 0;
692         }
693
694         DPU_ATRACE_BEGIN("frame done completion wait");
695         ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
696                         msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
697         if (!ret) {
698                 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
699                 rc = -ETIMEDOUT;
700         }
701         DPU_ATRACE_END("frame done completion wait");
702
703         return rc;
704 }
705
706 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
707 {
708         struct drm_encoder *encoder;
709         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
710         struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
711         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
712
713         /*
714          * If no mixers has been allocated in dpu_crtc_atomic_check(),
715          * it means we are trying to start a CRTC whose state is disabled:
716          * nothing else needs to be done.
717          */
718         if (unlikely(!cstate->num_mixers))
719                 return;
720
721         DPU_ATRACE_BEGIN("crtc_commit");
722
723         /*
724          * Encoder will flush/start now, unless it has a tx pending. If so, it
725          * may delay and flush at an irq event (e.g. ppdone)
726          */
727         drm_for_each_encoder_mask(encoder, crtc->dev,
728                                   crtc->state->encoder_mask)
729                 dpu_encoder_prepare_for_kickoff(encoder);
730
731         if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
732                 /* acquire bandwidth and other resources */
733                 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
734         } else
735                 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
736
737         dpu_crtc->play_count++;
738
739         dpu_vbif_clear_errors(dpu_kms);
740
741         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
742                 dpu_encoder_kickoff(encoder);
743
744         reinit_completion(&dpu_crtc->frame_done_comp);
745         DPU_ATRACE_END("crtc_commit");
746 }
747
748 static void dpu_crtc_reset(struct drm_crtc *crtc)
749 {
750         struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
751
752         if (crtc->state)
753                 dpu_crtc_destroy_state(crtc, crtc->state);
754
755         __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
756 }
757
758 /**
759  * dpu_crtc_duplicate_state - state duplicate hook
760  * @crtc: Pointer to drm crtc structure
761  */
762 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
763 {
764         struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
765
766         cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
767         if (!cstate) {
768                 DPU_ERROR("failed to allocate state\n");
769                 return NULL;
770         }
771
772         /* duplicate base helper */
773         __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
774
775         return &cstate->base;
776 }
777
778 static void dpu_crtc_disable(struct drm_crtc *crtc,
779                              struct drm_atomic_state *state)
780 {
781         struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
782                                                                               crtc);
783         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
784         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
785         struct drm_encoder *encoder;
786         unsigned long flags;
787         bool release_bandwidth = false;
788
789         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
790
791         /* Disable/save vblank irq handling */
792         drm_crtc_vblank_off(crtc);
793
794         drm_for_each_encoder_mask(encoder, crtc->dev,
795                                   old_crtc_state->encoder_mask) {
796                 /* in video mode, we hold an extra bandwidth reference
797                  * as we cannot drop bandwidth at frame-done if any
798                  * crtc is being used in video mode.
799                  */
800                 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
801                         release_bandwidth = true;
802                 dpu_encoder_assign_crtc(encoder, NULL);
803         }
804
805         /* wait for frame_event_done completion */
806         if (_dpu_crtc_wait_for_frame_done(crtc))
807                 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
808                                 crtc->base.id,
809                                 atomic_read(&dpu_crtc->frame_pending));
810
811         trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
812         dpu_crtc->enabled = false;
813
814         if (atomic_read(&dpu_crtc->frame_pending)) {
815                 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
816                                      atomic_read(&dpu_crtc->frame_pending));
817                 if (release_bandwidth)
818                         dpu_core_perf_crtc_release_bw(crtc);
819                 atomic_set(&dpu_crtc->frame_pending, 0);
820         }
821
822         dpu_core_perf_crtc_update(crtc, 0, true);
823
824         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
825                 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
826
827         memset(cstate->mixers, 0, sizeof(cstate->mixers));
828         cstate->num_mixers = 0;
829
830         /* disable clk & bw control until clk & bw properties are set */
831         cstate->bw_control = false;
832         cstate->bw_split_vote = false;
833
834         if (crtc->state->event && !crtc->state->active) {
835                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
836                 drm_crtc_send_vblank_event(crtc, crtc->state->event);
837                 crtc->state->event = NULL;
838                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
839         }
840
841         pm_runtime_put_sync(crtc->dev->dev);
842 }
843
844 static void dpu_crtc_enable(struct drm_crtc *crtc,
845                 struct drm_atomic_state *state)
846 {
847         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
848         struct drm_encoder *encoder;
849         bool request_bandwidth = false;
850
851         pm_runtime_get_sync(crtc->dev->dev);
852
853         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
854
855         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
856                 /* in video mode, we hold an extra bandwidth reference
857                  * as we cannot drop bandwidth at frame-done if any
858                  * crtc is being used in video mode.
859                  */
860                 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
861                         request_bandwidth = true;
862                 dpu_encoder_register_frame_event_callback(encoder,
863                                 dpu_crtc_frame_event_cb, (void *)crtc);
864         }
865
866         if (request_bandwidth)
867                 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
868
869         trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
870         dpu_crtc->enabled = true;
871
872         drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
873                 dpu_encoder_assign_crtc(encoder, crtc);
874
875         /* Enable/restore vblank irq handling */
876         drm_crtc_vblank_on(crtc);
877 }
878
879 struct plane_state {
880         struct dpu_plane_state *dpu_pstate;
881         const struct drm_plane_state *drm_pstate;
882         int stage;
883         u32 pipe_id;
884 };
885
886 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
887                 struct drm_atomic_state *state)
888 {
889         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
890                                                                           crtc);
891         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
892         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
893         struct plane_state *pstates;
894
895         const struct drm_plane_state *pstate;
896         struct drm_plane *plane;
897         struct drm_display_mode *mode;
898
899         int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
900
901         struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
902         int multirect_count = 0;
903         const struct drm_plane_state *pipe_staged[SSPP_MAX];
904         int left_zpos_cnt = 0, right_zpos_cnt = 0;
905         struct drm_rect crtc_rect = { 0 };
906
907         pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
908
909         if (!crtc_state->enable || !crtc_state->active) {
910                 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
911                                 crtc->base.id, crtc_state->enable,
912                                 crtc_state->active);
913                 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
914                 goto end;
915         }
916
917         mode = &crtc_state->adjusted_mode;
918         DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
919
920         /* force a full mode set if active state changed */
921         if (crtc_state->active_changed)
922                 crtc_state->mode_changed = true;
923
924         memset(pipe_staged, 0, sizeof(pipe_staged));
925
926         if (cstate->num_mixers) {
927                 mixer_width = mode->hdisplay / cstate->num_mixers;
928
929                 _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
930         }
931
932         crtc_rect.x2 = mode->hdisplay;
933         crtc_rect.y2 = mode->vdisplay;
934
935          /* get plane state for all drm planes associated with crtc state */
936         drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
937                 struct drm_rect dst, clip = crtc_rect;
938
939                 if (IS_ERR_OR_NULL(pstate)) {
940                         rc = PTR_ERR(pstate);
941                         DPU_ERROR("%s: failed to get plane%d state, %d\n",
942                                         dpu_crtc->name, plane->base.id, rc);
943                         goto end;
944                 }
945                 if (cnt >= DPU_STAGE_MAX * 4)
946                         continue;
947
948                 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
949                 pstates[cnt].drm_pstate = pstate;
950                 pstates[cnt].stage = pstate->normalized_zpos;
951                 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
952
953                 if (pipe_staged[pstates[cnt].pipe_id]) {
954                         multirect_plane[multirect_count].r0 =
955                                 pipe_staged[pstates[cnt].pipe_id];
956                         multirect_plane[multirect_count].r1 = pstate;
957                         multirect_count++;
958
959                         pipe_staged[pstates[cnt].pipe_id] = NULL;
960                 } else {
961                         pipe_staged[pstates[cnt].pipe_id] = pstate;
962                 }
963
964                 cnt++;
965
966                 dst = drm_plane_state_dest(pstate);
967                 if (!drm_rect_intersect(&clip, &dst)) {
968                         DPU_ERROR("invalid vertical/horizontal destination\n");
969                         DPU_ERROR("display: " DRM_RECT_FMT " plane: "
970                                   DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
971                                   DRM_RECT_ARG(&dst));
972                         rc = -E2BIG;
973                         goto end;
974                 }
975         }
976
977         for (i = 1; i < SSPP_MAX; i++) {
978                 if (pipe_staged[i]) {
979                         dpu_plane_clear_multirect(pipe_staged[i]);
980
981                         if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
982                                 DPU_ERROR(
983                                         "r1 only virt plane:%d not supported\n",
984                                         pipe_staged[i]->plane->base.id);
985                                 rc  = -EINVAL;
986                                 goto end;
987                         }
988                 }
989         }
990
991         z_pos = -1;
992         for (i = 0; i < cnt; i++) {
993                 /* reset counts at every new blend stage */
994                 if (pstates[i].stage != z_pos) {
995                         left_zpos_cnt = 0;
996                         right_zpos_cnt = 0;
997                         z_pos = pstates[i].stage;
998                 }
999
1000                 /* verify z_pos setting before using it */
1001                 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1002                         DPU_ERROR("> %d plane stages assigned\n",
1003                                         DPU_STAGE_MAX - DPU_STAGE_0);
1004                         rc = -EINVAL;
1005                         goto end;
1006                 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1007                         if (left_zpos_cnt == 2) {
1008                                 DPU_ERROR("> 2 planes @ stage %d on left\n",
1009                                         z_pos);
1010                                 rc = -EINVAL;
1011                                 goto end;
1012                         }
1013                         left_zpos_cnt++;
1014
1015                 } else {
1016                         if (right_zpos_cnt == 2) {
1017                                 DPU_ERROR("> 2 planes @ stage %d on right\n",
1018                                         z_pos);
1019                                 rc = -EINVAL;
1020                                 goto end;
1021                         }
1022                         right_zpos_cnt++;
1023                 }
1024
1025                 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1026                 DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
1027         }
1028
1029         for (i = 0; i < multirect_count; i++) {
1030                 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1031                         DPU_ERROR(
1032                         "multirect validation failed for planes (%d - %d)\n",
1033                                         multirect_plane[i].r0->plane->base.id,
1034                                         multirect_plane[i].r1->plane->base.id);
1035                         rc = -EINVAL;
1036                         goto end;
1037                 }
1038         }
1039
1040         atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1041
1042         rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1043         if (rc) {
1044                 DPU_ERROR("crtc%d failed performance check %d\n",
1045                                 crtc->base.id, rc);
1046                 goto end;
1047         }
1048
1049         /* validate source split:
1050          * use pstates sorted by stage to check planes on same stage
1051          * we assume that all pipes are in source split so its valid to compare
1052          * without taking into account left/right mixer placement
1053          */
1054         for (i = 1; i < cnt; i++) {
1055                 struct plane_state *prv_pstate, *cur_pstate;
1056                 struct drm_rect left_rect, right_rect;
1057                 int32_t left_pid, right_pid;
1058                 int32_t stage;
1059
1060                 prv_pstate = &pstates[i - 1];
1061                 cur_pstate = &pstates[i];
1062                 if (prv_pstate->stage != cur_pstate->stage)
1063                         continue;
1064
1065                 stage = cur_pstate->stage;
1066
1067                 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1068                 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1069
1070                 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1071                 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1072
1073                 if (right_rect.x1 < left_rect.x1) {
1074                         swap(left_pid, right_pid);
1075                         swap(left_rect, right_rect);
1076                 }
1077
1078                 /**
1079                  * - planes are enumerated in pipe-priority order such that
1080                  *   planes with lower drm_id must be left-most in a shared
1081                  *   blend-stage when using source split.
1082                  * - planes in source split must be contiguous in width
1083                  * - planes in source split must have same dest yoff and height
1084                  */
1085                 if (right_pid < left_pid) {
1086                         DPU_ERROR(
1087                                 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1088                                 stage, left_pid, right_pid);
1089                         rc = -EINVAL;
1090                         goto end;
1091                 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1092                         DPU_ERROR("non-contiguous coordinates for src split. "
1093                                   "stage: %d left: " DRM_RECT_FMT " right: "
1094                                   DRM_RECT_FMT "\n", stage,
1095                                   DRM_RECT_ARG(&left_rect),
1096                                   DRM_RECT_ARG(&right_rect));
1097                         rc = -EINVAL;
1098                         goto end;
1099                 } else if (left_rect.y1 != right_rect.y1 ||
1100                            drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1101                         DPU_ERROR("source split at stage: %d. invalid "
1102                                   "yoff/height: left: " DRM_RECT_FMT " right: "
1103                                   DRM_RECT_FMT "\n", stage,
1104                                   DRM_RECT_ARG(&left_rect),
1105                                   DRM_RECT_ARG(&right_rect));
1106                         rc = -EINVAL;
1107                         goto end;
1108                 }
1109         }
1110
1111 end:
1112         kfree(pstates);
1113         return rc;
1114 }
1115
1116 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1117 {
1118         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1119         struct drm_encoder *enc;
1120
1121         trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1122
1123         /*
1124          * Normally we would iterate through encoder_mask in crtc state to find
1125          * attached encoders. In this case, we might be disabling vblank _after_
1126          * encoder_mask has been cleared.
1127          *
1128          * Instead, we "assign" a crtc to the encoder in enable and clear it in
1129          * disable (which is also after encoder_mask is cleared). So instead of
1130          * using encoder mask, we'll ask the encoder to toggle itself iff it's
1131          * currently assigned to our crtc.
1132          *
1133          * Note also that this function cannot be called while crtc is disabled
1134          * since we use drm_crtc_vblank_on/off. So we don't need to worry
1135          * about the assigned crtcs being inconsistent with the current state
1136          * (which means no need to worry about modeset locks).
1137          */
1138         list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1139                 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1140                                              dpu_crtc);
1141
1142                 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1143         }
1144
1145         return 0;
1146 }
1147
1148 #ifdef CONFIG_DEBUG_FS
1149 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1150 {
1151         struct dpu_crtc *dpu_crtc;
1152         struct dpu_plane_state *pstate = NULL;
1153         struct dpu_crtc_mixer *m;
1154
1155         struct drm_crtc *crtc;
1156         struct drm_plane *plane;
1157         struct drm_display_mode *mode;
1158         struct drm_framebuffer *fb;
1159         struct drm_plane_state *state;
1160         struct dpu_crtc_state *cstate;
1161
1162         int i, out_width;
1163
1164         dpu_crtc = s->private;
1165         crtc = &dpu_crtc->base;
1166
1167         drm_modeset_lock_all(crtc->dev);
1168         cstate = to_dpu_crtc_state(crtc->state);
1169
1170         mode = &crtc->state->adjusted_mode;
1171         out_width = mode->hdisplay / cstate->num_mixers;
1172
1173         seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1174                                 mode->hdisplay, mode->vdisplay);
1175
1176         seq_puts(s, "\n");
1177
1178         for (i = 0; i < cstate->num_mixers; ++i) {
1179                 m = &cstate->mixers[i];
1180                 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1181                         m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1182                         out_width, mode->vdisplay);
1183         }
1184
1185         seq_puts(s, "\n");
1186
1187         drm_atomic_crtc_for_each_plane(plane, crtc) {
1188                 pstate = to_dpu_plane_state(plane->state);
1189                 state = plane->state;
1190
1191                 if (!pstate || !state)
1192                         continue;
1193
1194                 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1195                         pstate->stage);
1196
1197                 if (plane->state->fb) {
1198                         fb = plane->state->fb;
1199
1200                         seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1201                                 fb->base.id, (char *) &fb->format->format,
1202                                 fb->width, fb->height);
1203                         for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1204                                 seq_printf(s, "cpp[%d]:%u ",
1205                                                 i, fb->format->cpp[i]);
1206                         seq_puts(s, "\n\t");
1207
1208                         seq_printf(s, "modifier:%8llu ", fb->modifier);
1209                         seq_puts(s, "\n");
1210
1211                         seq_puts(s, "\t");
1212                         for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1213                                 seq_printf(s, "pitches[%d]:%8u ", i,
1214                                                         fb->pitches[i]);
1215                         seq_puts(s, "\n");
1216
1217                         seq_puts(s, "\t");
1218                         for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1219                                 seq_printf(s, "offsets[%d]:%8u ", i,
1220                                                         fb->offsets[i]);
1221                         seq_puts(s, "\n");
1222                 }
1223
1224                 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1225                         state->src_x, state->src_y, state->src_w, state->src_h);
1226
1227                 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1228                         state->crtc_x, state->crtc_y, state->crtc_w,
1229                         state->crtc_h);
1230                 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1231                         pstate->multirect_mode, pstate->multirect_index);
1232
1233                 seq_puts(s, "\n");
1234         }
1235         if (dpu_crtc->vblank_cb_count) {
1236                 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1237                 s64 diff_ms = ktime_to_ms(diff);
1238                 s64 fps = diff_ms ? div_s64(
1239                                 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1240
1241                 seq_printf(s,
1242                         "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1243                                 fps, dpu_crtc->vblank_cb_count,
1244                                 ktime_to_ms(diff), dpu_crtc->play_count);
1245
1246                 /* reset time & count for next measurement */
1247                 dpu_crtc->vblank_cb_count = 0;
1248                 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1249         }
1250
1251         drm_modeset_unlock_all(crtc->dev);
1252
1253         return 0;
1254 }
1255
1256 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1257
1258 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1259 {
1260         struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1261         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1262
1263         seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1264         seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1265         seq_printf(s, "core_clk_rate: %llu\n",
1266                         dpu_crtc->cur_perf.core_clk_rate);
1267         seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1268         seq_printf(s, "max_per_pipe_ib: %llu\n",
1269                                 dpu_crtc->cur_perf.max_per_pipe_ib);
1270
1271         return 0;
1272 }
1273 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1274
1275 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1276 {
1277         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1278
1279         dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1280                         crtc->dev->primary->debugfs_root);
1281
1282         debugfs_create_file("status", 0400,
1283                         dpu_crtc->debugfs_root,
1284                         dpu_crtc, &_dpu_debugfs_status_fops);
1285         debugfs_create_file("state", 0600,
1286                         dpu_crtc->debugfs_root,
1287                         &dpu_crtc->base,
1288                         &dpu_crtc_debugfs_state_fops);
1289
1290         return 0;
1291 }
1292 #else
1293 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1294 {
1295         return 0;
1296 }
1297 #endif /* CONFIG_DEBUG_FS */
1298
1299 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1300 {
1301         return _dpu_crtc_init_debugfs(crtc);
1302 }
1303
1304 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1305 {
1306         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1307
1308         debugfs_remove_recursive(dpu_crtc->debugfs_root);
1309 }
1310
1311 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1312         .set_config = drm_atomic_helper_set_config,
1313         .destroy = dpu_crtc_destroy,
1314         .page_flip = drm_atomic_helper_page_flip,
1315         .reset = dpu_crtc_reset,
1316         .atomic_duplicate_state = dpu_crtc_duplicate_state,
1317         .atomic_destroy_state = dpu_crtc_destroy_state,
1318         .late_register = dpu_crtc_late_register,
1319         .early_unregister = dpu_crtc_early_unregister,
1320         .enable_vblank  = msm_crtc_enable_vblank,
1321         .disable_vblank = msm_crtc_disable_vblank,
1322         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1323         .get_vblank_counter = dpu_crtc_get_vblank_counter,
1324 };
1325
1326 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1327         .atomic_disable = dpu_crtc_disable,
1328         .atomic_enable = dpu_crtc_enable,
1329         .atomic_check = dpu_crtc_atomic_check,
1330         .atomic_begin = dpu_crtc_atomic_begin,
1331         .atomic_flush = dpu_crtc_atomic_flush,
1332         .get_scanout_position = dpu_crtc_get_scanout_position,
1333 };
1334
1335 /* initialize crtc */
1336 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1337                                 struct drm_plane *cursor)
1338 {
1339         struct drm_crtc *crtc = NULL;
1340         struct dpu_crtc *dpu_crtc = NULL;
1341         int i;
1342
1343         dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1344         if (!dpu_crtc)
1345                 return ERR_PTR(-ENOMEM);
1346
1347         crtc = &dpu_crtc->base;
1348         crtc->dev = dev;
1349
1350         spin_lock_init(&dpu_crtc->spin_lock);
1351         atomic_set(&dpu_crtc->frame_pending, 0);
1352
1353         init_completion(&dpu_crtc->frame_done_comp);
1354
1355         INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1356
1357         for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1358                 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1359                 list_add(&dpu_crtc->frame_events[i].list,
1360                                 &dpu_crtc->frame_event_list);
1361                 kthread_init_work(&dpu_crtc->frame_events[i].work,
1362                                 dpu_crtc_frame_event_work);
1363         }
1364
1365         drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1366                                 NULL);
1367
1368         drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1369
1370         drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1371
1372         /* save user friendly CRTC name for later */
1373         snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1374
1375         /* initialize event handling */
1376         spin_lock_init(&dpu_crtc->event_lock);
1377
1378         DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
1379         return crtc;
1380 }