drm/msm: dpu: Don't drop locks in crtc_vblank_enable
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_crtc.c
1 /*
2  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #define pr_fmt(fmt)     "[drm:%s:%d] " fmt, __func__, __LINE__
20 #include <linux/sort.h>
21 #include <linux/debugfs.h>
22 #include <linux/ktime.h>
23 #include <drm/drm_mode.h>
24 #include <drm/drm_crtc.h>
25 #include <drm/drm_crtc_helper.h>
26 #include <drm/drm_flip_work.h>
27 #include <drm/drm_rect.h>
28
29 #include "dpu_kms.h"
30 #include "dpu_hw_lm.h"
31 #include "dpu_hw_ctl.h"
32 #include "dpu_crtc.h"
33 #include "dpu_plane.h"
34 #include "dpu_encoder.h"
35 #include "dpu_vbif.h"
36 #include "dpu_core_perf.h"
37 #include "dpu_trace.h"
38
39 #define DPU_DRM_BLEND_OP_NOT_DEFINED    0
40 #define DPU_DRM_BLEND_OP_OPAQUE         1
41 #define DPU_DRM_BLEND_OP_PREMULTIPLIED  2
42 #define DPU_DRM_BLEND_OP_COVERAGE       3
43 #define DPU_DRM_BLEND_OP_MAX            4
44
45 /* layer mixer index on dpu_crtc */
46 #define LEFT_MIXER 0
47 #define RIGHT_MIXER 1
48
49 static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
50                                             struct drm_display_mode *mode)
51 {
52         return mode->hdisplay / cstate->num_mixers;
53 }
54
55 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
56 {
57         struct msm_drm_private *priv = crtc->dev->dev_private;
58
59         return to_dpu_kms(priv->kms);
60 }
61
62 static void dpu_crtc_destroy(struct drm_crtc *crtc)
63 {
64         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
65
66         DPU_DEBUG("\n");
67
68         if (!crtc)
69                 return;
70
71         drm_crtc_cleanup(crtc);
72         mutex_destroy(&dpu_crtc->crtc_lock);
73         kfree(dpu_crtc);
74 }
75
76 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
77                 struct dpu_plane_state *pstate, struct dpu_format *format)
78 {
79         struct dpu_hw_mixer *lm = mixer->hw_lm;
80         uint32_t blend_op;
81         struct drm_format_name_buf format_name;
82
83         /* default to opaque blending */
84         blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
85                 DPU_BLEND_BG_ALPHA_BG_CONST;
86
87         if (format->alpha_enable) {
88                 /* coverage blending */
89                 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
90                         DPU_BLEND_BG_ALPHA_FG_PIXEL |
91                         DPU_BLEND_BG_INV_ALPHA;
92         }
93
94         lm->ops.setup_blend_config(lm, pstate->stage,
95                                 0xFF, 0, blend_op);
96
97         DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
98                 drm_get_format_name(format->base.pixel_format, &format_name),
99                 format->alpha_enable, blend_op);
100 }
101
102 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
103 {
104         struct dpu_crtc *dpu_crtc;
105         struct dpu_crtc_state *crtc_state;
106         int lm_idx, lm_horiz_position;
107
108         dpu_crtc = to_dpu_crtc(crtc);
109         crtc_state = to_dpu_crtc_state(crtc->state);
110
111         lm_horiz_position = 0;
112         for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
113                 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
114                 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
115                 struct dpu_hw_mixer_cfg cfg;
116
117                 if (!lm_roi || !drm_rect_visible(lm_roi))
118                         continue;
119
120                 cfg.out_width = drm_rect_width(lm_roi);
121                 cfg.out_height = drm_rect_height(lm_roi);
122                 cfg.right_mixer = lm_horiz_position++;
123                 cfg.flags = 0;
124                 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
125         }
126 }
127
128 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
129         struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
130 {
131         struct drm_plane *plane;
132         struct drm_framebuffer *fb;
133         struct drm_plane_state *state;
134         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
135         struct dpu_plane_state *pstate = NULL;
136         struct dpu_format *format;
137         struct dpu_hw_ctl *ctl = mixer->lm_ctl;
138         struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
139
140         u32 flush_mask;
141         uint32_t stage_idx, lm_idx;
142         int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
143         bool bg_alpha_enable = false;
144
145         drm_atomic_crtc_for_each_plane(plane, crtc) {
146                 state = plane->state;
147                 if (!state)
148                         continue;
149
150                 pstate = to_dpu_plane_state(state);
151                 fb = state->fb;
152
153                 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
154
155                 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
156                                 crtc->base.id,
157                                 pstate->stage,
158                                 plane->base.id,
159                                 dpu_plane_pipe(plane) - SSPP_VIG0,
160                                 state->fb ? state->fb->base.id : -1);
161
162                 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
163
164                 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
165                         bg_alpha_enable = true;
166
167                 stage_idx = zpos_cnt[pstate->stage]++;
168                 stage_cfg->stage[pstate->stage][stage_idx] =
169                                         dpu_plane_pipe(plane);
170                 stage_cfg->multirect_index[pstate->stage][stage_idx] =
171                                         pstate->multirect_index;
172
173                 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
174                                            state, pstate, stage_idx,
175                                            dpu_plane_pipe(plane) - SSPP_VIG0,
176                                            format->base.pixel_format,
177                                            fb ? fb->modifier : 0);
178
179                 /* blend config update */
180                 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
181                         _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
182                                                 pstate, format);
183
184                         mixer[lm_idx].flush_mask |= flush_mask;
185
186                         if (bg_alpha_enable && !format->alpha_enable)
187                                 mixer[lm_idx].mixer_op_mode = 0;
188                         else
189                                 mixer[lm_idx].mixer_op_mode |=
190                                                 1 << pstate->stage;
191                 }
192         }
193
194          _dpu_crtc_program_lm_output_roi(crtc);
195 }
196
197 /**
198  * _dpu_crtc_blend_setup - configure crtc mixers
199  * @crtc: Pointer to drm crtc structure
200  */
201 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
202 {
203         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
204         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
205         struct dpu_crtc_mixer *mixer = cstate->mixers;
206         struct dpu_hw_ctl *ctl;
207         struct dpu_hw_mixer *lm;
208         int i;
209
210         DPU_DEBUG("%s\n", dpu_crtc->name);
211
212         for (i = 0; i < cstate->num_mixers; i++) {
213                 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
214                         DPU_ERROR("invalid lm or ctl assigned to mixer\n");
215                         return;
216                 }
217                 mixer[i].mixer_op_mode = 0;
218                 mixer[i].flush_mask = 0;
219                 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
220                         mixer[i].lm_ctl->ops.clear_all_blendstages(
221                                         mixer[i].lm_ctl);
222         }
223
224         /* initialize stage cfg */
225         memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
226
227         _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
228
229         for (i = 0; i < cstate->num_mixers; i++) {
230                 ctl = mixer[i].lm_ctl;
231                 lm = mixer[i].hw_lm;
232
233                 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
234
235                 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
236                         mixer[i].hw_lm->idx);
237
238                 /* stage config flush mask */
239                 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
240
241                 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
242                         mixer[i].hw_lm->idx - LM_0,
243                         mixer[i].mixer_op_mode,
244                         ctl->idx - CTL_0,
245                         mixer[i].flush_mask);
246
247                 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
248                         &dpu_crtc->stage_cfg);
249         }
250 }
251
252 /**
253  *  _dpu_crtc_complete_flip - signal pending page_flip events
254  * Any pending vblank events are added to the vblank_event_list
255  * so that the next vblank interrupt shall signal them.
256  * However PAGE_FLIP events are not handled through the vblank_event_list.
257  * This API signals any pending PAGE_FLIP events requested through
258  * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
259  * @crtc: Pointer to drm crtc structure
260  */
261 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
262 {
263         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
264         struct drm_device *dev = crtc->dev;
265         unsigned long flags;
266
267         spin_lock_irqsave(&dev->event_lock, flags);
268         if (dpu_crtc->event) {
269                 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
270                               dpu_crtc->event);
271                 trace_dpu_crtc_complete_flip(DRMID(crtc));
272                 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
273                 dpu_crtc->event = NULL;
274         }
275         spin_unlock_irqrestore(&dev->event_lock, flags);
276 }
277
278 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
279 {
280         struct drm_encoder *encoder;
281
282         if (!crtc || !crtc->dev) {
283                 DPU_ERROR("invalid crtc\n");
284                 return INTF_MODE_NONE;
285         }
286
287         drm_for_each_encoder(encoder, crtc->dev)
288                 if (encoder->crtc == crtc)
289                         return dpu_encoder_get_intf_mode(encoder);
290
291         return INTF_MODE_NONE;
292 }
293
294 static void dpu_crtc_vblank_cb(void *data)
295 {
296         struct drm_crtc *crtc = (struct drm_crtc *)data;
297         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
298
299         /* keep statistics on vblank callback - with auto reset via debugfs */
300         if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
301                 dpu_crtc->vblank_cb_time = ktime_get();
302         else
303                 dpu_crtc->vblank_cb_count++;
304         _dpu_crtc_complete_flip(crtc);
305         drm_crtc_handle_vblank(crtc);
306         trace_dpu_crtc_vblank_cb(DRMID(crtc));
307 }
308
309 static void dpu_crtc_frame_event_work(struct kthread_work *work)
310 {
311         struct dpu_crtc_frame_event *fevent = container_of(work,
312                         struct dpu_crtc_frame_event, work);
313         struct drm_crtc *crtc = fevent->crtc;
314         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
315         unsigned long flags;
316         bool frame_done = false;
317
318         DPU_ATRACE_BEGIN("crtc_frame_event");
319
320         DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
321                         ktime_to_ns(fevent->ts));
322
323         if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
324                                 | DPU_ENCODER_FRAME_EVENT_ERROR
325                                 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
326
327                 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
328                         /* this should not happen */
329                         DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
330                                         crtc->base.id,
331                                         fevent->event,
332                                         ktime_to_ns(fevent->ts),
333                                         atomic_read(&dpu_crtc->frame_pending));
334                 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
335                         /* release bandwidth and other resources */
336                         trace_dpu_crtc_frame_event_done(DRMID(crtc),
337                                                         fevent->event);
338                         dpu_core_perf_crtc_release_bw(crtc);
339                 } else {
340                         trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
341                                                                 fevent->event);
342                 }
343
344                 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
345                         dpu_core_perf_crtc_update(crtc, 0, false);
346
347                 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
348                                         | DPU_ENCODER_FRAME_EVENT_ERROR))
349                         frame_done = true;
350         }
351
352         if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
353                 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
354                                 crtc->base.id, ktime_to_ns(fevent->ts));
355
356         if (frame_done)
357                 complete_all(&dpu_crtc->frame_done_comp);
358
359         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
360         list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
361         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
362         DPU_ATRACE_END("crtc_frame_event");
363 }
364
365 /*
366  * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
367  * registers this API to encoder for all frame event callbacks like
368  * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
369  * from different context - IRQ, user thread, commit_thread, etc. Each event
370  * should be carefully reviewed and should be processed in proper task context
371  * to avoid schedulin delay or properly manage the irq context's bottom half
372  * processing.
373  */
374 static void dpu_crtc_frame_event_cb(void *data, u32 event)
375 {
376         struct drm_crtc *crtc = (struct drm_crtc *)data;
377         struct dpu_crtc *dpu_crtc;
378         struct msm_drm_private *priv;
379         struct dpu_crtc_frame_event *fevent;
380         unsigned long flags;
381         u32 crtc_id;
382
383         /* Nothing to do on idle event */
384         if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
385                 return;
386
387         dpu_crtc = to_dpu_crtc(crtc);
388         priv = crtc->dev->dev_private;
389         crtc_id = drm_crtc_index(crtc);
390
391         trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
392
393         spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
394         fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
395                         struct dpu_crtc_frame_event, list);
396         if (fevent)
397                 list_del_init(&fevent->list);
398         spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
399
400         if (!fevent) {
401                 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
402                 return;
403         }
404
405         fevent->event = event;
406         fevent->crtc = crtc;
407         fevent->ts = ktime_get();
408         kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
409 }
410
411 void dpu_crtc_complete_commit(struct drm_crtc *crtc,
412                 struct drm_crtc_state *old_state)
413 {
414         if (!crtc || !crtc->state) {
415                 DPU_ERROR("invalid crtc\n");
416                 return;
417         }
418         trace_dpu_crtc_complete_commit(DRMID(crtc));
419 }
420
421 static void _dpu_crtc_setup_mixer_for_encoder(
422                 struct drm_crtc *crtc,
423                 struct drm_encoder *enc)
424 {
425         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
426         struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
427         struct dpu_rm *rm = &dpu_kms->rm;
428         struct dpu_crtc_mixer *mixer;
429         struct dpu_hw_ctl *last_valid_ctl = NULL;
430         int i;
431         struct dpu_rm_hw_iter lm_iter, ctl_iter;
432
433         dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
434         dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
435
436         /* Set up all the mixers and ctls reserved by this encoder */
437         for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
438                 mixer = &cstate->mixers[i];
439
440                 if (!dpu_rm_get_hw(rm, &lm_iter))
441                         break;
442                 mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
443
444                 /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
445                 if (!dpu_rm_get_hw(rm, &ctl_iter)) {
446                         DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
447                                         mixer->hw_lm->idx - LM_0);
448                         mixer->lm_ctl = last_valid_ctl;
449                 } else {
450                         mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
451                         last_valid_ctl = mixer->lm_ctl;
452                 }
453
454                 /* Shouldn't happen, mixers are always >= ctls */
455                 if (!mixer->lm_ctl) {
456                         DPU_ERROR("no valid ctls found for lm %d\n",
457                                         mixer->hw_lm->idx - LM_0);
458                         return;
459                 }
460
461                 mixer->encoder = enc;
462
463                 cstate->num_mixers++;
464                 DPU_DEBUG("setup mixer %d: lm %d\n",
465                                 i, mixer->hw_lm->idx - LM_0);
466                 DPU_DEBUG("setup mixer %d: ctl %d\n",
467                                 i, mixer->lm_ctl->idx - CTL_0);
468         }
469 }
470
471 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
472 {
473         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
474         struct drm_encoder *enc;
475
476         mutex_lock(&dpu_crtc->crtc_lock);
477         /* Check for mixers on all encoders attached to this crtc */
478         list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
479                 if (enc->crtc != crtc)
480                         continue;
481
482                 _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
483         }
484
485         mutex_unlock(&dpu_crtc->crtc_lock);
486 }
487
488 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
489                 struct drm_crtc_state *state)
490 {
491         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
492         struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
493         struct drm_display_mode *adj_mode = &state->adjusted_mode;
494         u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
495         int i;
496
497         for (i = 0; i < cstate->num_mixers; i++) {
498                 struct drm_rect *r = &cstate->lm_bounds[i];
499                 r->x1 = crtc_split_width * i;
500                 r->y1 = 0;
501                 r->x2 = r->x1 + crtc_split_width;
502                 r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
503
504                 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
505         }
506
507         drm_mode_debug_printmodeline(adj_mode);
508 }
509
510 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
511                 struct drm_crtc_state *old_state)
512 {
513         struct dpu_crtc *dpu_crtc;
514         struct dpu_crtc_state *cstate;
515         struct drm_encoder *encoder;
516         struct drm_device *dev;
517         unsigned long flags;
518         struct dpu_crtc_smmu_state_data *smmu_state;
519
520         if (!crtc) {
521                 DPU_ERROR("invalid crtc\n");
522                 return;
523         }
524
525         if (!crtc->state->enable) {
526                 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
527                                 crtc->base.id, crtc->state->enable);
528                 return;
529         }
530
531         DPU_DEBUG("crtc%d\n", crtc->base.id);
532
533         dpu_crtc = to_dpu_crtc(crtc);
534         cstate = to_dpu_crtc_state(crtc->state);
535         dev = crtc->dev;
536         smmu_state = &dpu_crtc->smmu_state;
537
538         if (!cstate->num_mixers) {
539                 _dpu_crtc_setup_mixers(crtc);
540                 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
541         }
542
543         if (dpu_crtc->event) {
544                 WARN_ON(dpu_crtc->event);
545         } else {
546                 spin_lock_irqsave(&dev->event_lock, flags);
547                 dpu_crtc->event = crtc->state->event;
548                 crtc->state->event = NULL;
549                 spin_unlock_irqrestore(&dev->event_lock, flags);
550         }
551
552         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
553                 if (encoder->crtc != crtc)
554                         continue;
555
556                 /* encoder will trigger pending mask now */
557                 dpu_encoder_trigger_kickoff_pending(encoder);
558         }
559
560         /*
561          * If no mixers have been allocated in dpu_crtc_atomic_check(),
562          * it means we are trying to flush a CRTC whose state is disabled:
563          * nothing else needs to be done.
564          */
565         if (unlikely(!cstate->num_mixers))
566                 return;
567
568         _dpu_crtc_blend_setup(crtc);
569
570         /*
571          * PP_DONE irq is only used by command mode for now.
572          * It is better to request pending before FLUSH and START trigger
573          * to make sure no pp_done irq missed.
574          * This is safe because no pp_done will happen before SW trigger
575          * in command mode.
576          */
577 }
578
579 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
580                 struct drm_crtc_state *old_crtc_state)
581 {
582         struct dpu_crtc *dpu_crtc;
583         struct drm_device *dev;
584         struct drm_plane *plane;
585         struct msm_drm_private *priv;
586         struct msm_drm_thread *event_thread;
587         unsigned long flags;
588         struct dpu_crtc_state *cstate;
589
590         if (!crtc->state->enable) {
591                 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
592                                 crtc->base.id, crtc->state->enable);
593                 return;
594         }
595
596         DPU_DEBUG("crtc%d\n", crtc->base.id);
597
598         dpu_crtc = to_dpu_crtc(crtc);
599         cstate = to_dpu_crtc_state(crtc->state);
600         dev = crtc->dev;
601         priv = dev->dev_private;
602
603         if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
604                 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
605                 return;
606         }
607
608         event_thread = &priv->event_thread[crtc->index];
609
610         if (dpu_crtc->event) {
611                 DPU_DEBUG("already received dpu_crtc->event\n");
612         } else {
613                 spin_lock_irqsave(&dev->event_lock, flags);
614                 dpu_crtc->event = crtc->state->event;
615                 crtc->state->event = NULL;
616                 spin_unlock_irqrestore(&dev->event_lock, flags);
617         }
618
619         /*
620          * If no mixers has been allocated in dpu_crtc_atomic_check(),
621          * it means we are trying to flush a CRTC whose state is disabled:
622          * nothing else needs to be done.
623          */
624         if (unlikely(!cstate->num_mixers))
625                 return;
626
627         /*
628          * For planes without commit update, drm framework will not add
629          * those planes to current state since hardware update is not
630          * required. However, if those planes were power collapsed since
631          * last commit cycle, driver has to restore the hardware state
632          * of those planes explicitly here prior to plane flush.
633          */
634         drm_atomic_crtc_for_each_plane(plane, crtc)
635                 dpu_plane_restore(plane);
636
637         /* update performance setting before crtc kickoff */
638         dpu_core_perf_crtc_update(crtc, 1, false);
639
640         /*
641          * Final plane updates: Give each plane a chance to complete all
642          *                      required writes/flushing before crtc's "flush
643          *                      everything" call below.
644          */
645         drm_atomic_crtc_for_each_plane(plane, crtc) {
646                 if (dpu_crtc->smmu_state.transition_error)
647                         dpu_plane_set_error(plane, true);
648                 dpu_plane_flush(plane);
649         }
650
651         /* Kickoff will be scheduled by outer layer */
652 }
653
654 /**
655  * dpu_crtc_destroy_state - state destroy hook
656  * @crtc: drm CRTC
657  * @state: CRTC state object to release
658  */
659 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
660                 struct drm_crtc_state *state)
661 {
662         struct dpu_crtc *dpu_crtc;
663         struct dpu_crtc_state *cstate;
664
665         if (!crtc || !state) {
666                 DPU_ERROR("invalid argument(s)\n");
667                 return;
668         }
669
670         dpu_crtc = to_dpu_crtc(crtc);
671         cstate = to_dpu_crtc_state(state);
672
673         DPU_DEBUG("crtc%d\n", crtc->base.id);
674
675         __drm_atomic_helper_crtc_destroy_state(state);
676
677         kfree(cstate);
678 }
679
680 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
681 {
682         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
683         int ret, rc = 0;
684
685         if (!atomic_read(&dpu_crtc->frame_pending)) {
686                 DPU_DEBUG("no frames pending\n");
687                 return 0;
688         }
689
690         DPU_ATRACE_BEGIN("frame done completion wait");
691         ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
692                         msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
693         if (!ret) {
694                 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
695                 rc = -ETIMEDOUT;
696         }
697         DPU_ATRACE_END("frame done completion wait");
698
699         return rc;
700 }
701
702 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
703 {
704         struct drm_encoder *encoder;
705         struct drm_device *dev = crtc->dev;
706         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
707         struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
708         struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
709         int ret;
710
711         /*
712          * If no mixers has been allocated in dpu_crtc_atomic_check(),
713          * it means we are trying to start a CRTC whose state is disabled:
714          * nothing else needs to be done.
715          */
716         if (unlikely(!cstate->num_mixers))
717                 return;
718
719         DPU_ATRACE_BEGIN("crtc_commit");
720
721         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
722                 struct dpu_encoder_kickoff_params params = { 0 };
723
724                 if (encoder->crtc != crtc)
725                         continue;
726
727                 /*
728                  * Encoder will flush/start now, unless it has a tx pending.
729                  * If so, it may delay and flush at an irq event (e.g. ppdone)
730                  */
731                 dpu_encoder_prepare_for_kickoff(encoder, &params, async);
732         }
733
734
735         if (!async) {
736                 /* wait for frame_event_done completion */
737                 DPU_ATRACE_BEGIN("wait_for_frame_done_event");
738                 ret = _dpu_crtc_wait_for_frame_done(crtc);
739                 DPU_ATRACE_END("wait_for_frame_done_event");
740                 if (ret) {
741                         DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
742                                         crtc->base.id,
743                                         atomic_read(&dpu_crtc->frame_pending));
744                         goto end;
745                 }
746
747                 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
748                         /* acquire bandwidth and other resources */
749                         DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
750                 } else
751                         DPU_DEBUG("crtc%d commit\n", crtc->base.id);
752
753                 dpu_crtc->play_count++;
754         }
755
756         dpu_vbif_clear_errors(dpu_kms);
757
758         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
759                 if (encoder->crtc != crtc)
760                         continue;
761
762                 dpu_encoder_kickoff(encoder, async);
763         }
764
765 end:
766         if (!async)
767                 reinit_completion(&dpu_crtc->frame_done_comp);
768         DPU_ATRACE_END("crtc_commit");
769 }
770
771 /**
772  * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
773  * @dpu_crtc: Pointer to dpu crtc structure
774  * @enable: Whether to enable/disable vblanks
775  */
776 static void _dpu_crtc_vblank_enable_no_lock(
777                 struct dpu_crtc *dpu_crtc, bool enable)
778 {
779         struct drm_crtc *crtc = &dpu_crtc->base;
780         struct drm_device *dev = crtc->dev;
781         struct drm_encoder *enc;
782
783         if (enable) {
784                 pm_runtime_get_sync(dev->dev);
785
786                 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
787                         if (enc->crtc != crtc)
788                                 continue;
789
790                         trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
791                                                      DRMID(enc), enable,
792                                                      dpu_crtc);
793
794                         dpu_encoder_register_vblank_callback(enc,
795                                         dpu_crtc_vblank_cb, (void *)crtc);
796                 }
797         } else {
798                 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
799                         if (enc->crtc != crtc)
800                                 continue;
801
802                         trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
803                                                      DRMID(enc), enable,
804                                                      dpu_crtc);
805
806                         dpu_encoder_register_vblank_callback(enc, NULL, NULL);
807                 }
808
809                 pm_runtime_put_sync(dev->dev);
810         }
811 }
812
813 static void dpu_crtc_reset(struct drm_crtc *crtc)
814 {
815         struct dpu_crtc_state *cstate;
816
817         if (crtc->state)
818                 dpu_crtc_destroy_state(crtc, crtc->state);
819
820         crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
821         if (crtc->state)
822                 crtc->state->crtc = crtc;
823 }
824
825 /**
826  * dpu_crtc_duplicate_state - state duplicate hook
827  * @crtc: Pointer to drm crtc structure
828  * @Returns: Pointer to new drm_crtc_state structure
829  */
830 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
831 {
832         struct dpu_crtc *dpu_crtc;
833         struct dpu_crtc_state *cstate, *old_cstate;
834
835         if (!crtc || !crtc->state) {
836                 DPU_ERROR("invalid argument(s)\n");
837                 return NULL;
838         }
839
840         dpu_crtc = to_dpu_crtc(crtc);
841         old_cstate = to_dpu_crtc_state(crtc->state);
842         cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
843         if (!cstate) {
844                 DPU_ERROR("failed to allocate state\n");
845                 return NULL;
846         }
847
848         /* duplicate base helper */
849         __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
850
851         return &cstate->base;
852 }
853
854 static void dpu_crtc_disable(struct drm_crtc *crtc)
855 {
856         struct dpu_crtc *dpu_crtc;
857         struct dpu_crtc_state *cstate;
858         struct drm_display_mode *mode;
859         struct drm_encoder *encoder;
860         struct msm_drm_private *priv;
861         unsigned long flags;
862
863         if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
864                 DPU_ERROR("invalid crtc\n");
865                 return;
866         }
867         dpu_crtc = to_dpu_crtc(crtc);
868         cstate = to_dpu_crtc_state(crtc->state);
869         mode = &cstate->base.adjusted_mode;
870         priv = crtc->dev->dev_private;
871
872         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
873
874         /* Disable/save vblank irq handling */
875         drm_crtc_vblank_off(crtc);
876
877         mutex_lock(&dpu_crtc->crtc_lock);
878
879         /* wait for frame_event_done completion */
880         if (_dpu_crtc_wait_for_frame_done(crtc))
881                 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
882                                 crtc->base.id,
883                                 atomic_read(&dpu_crtc->frame_pending));
884
885         trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
886         if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
887                 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
888         }
889         dpu_crtc->enabled = false;
890
891         if (atomic_read(&dpu_crtc->frame_pending)) {
892                 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
893                                      atomic_read(&dpu_crtc->frame_pending));
894                 dpu_core_perf_crtc_release_bw(crtc);
895                 atomic_set(&dpu_crtc->frame_pending, 0);
896         }
897
898         dpu_core_perf_crtc_update(crtc, 0, true);
899
900         drm_for_each_encoder(encoder, crtc->dev) {
901                 if (encoder->crtc != crtc)
902                         continue;
903                 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
904         }
905
906         memset(cstate->mixers, 0, sizeof(cstate->mixers));
907         cstate->num_mixers = 0;
908
909         /* disable clk & bw control until clk & bw properties are set */
910         cstate->bw_control = false;
911         cstate->bw_split_vote = false;
912
913         mutex_unlock(&dpu_crtc->crtc_lock);
914
915         if (crtc->state->event && !crtc->state->active) {
916                 spin_lock_irqsave(&crtc->dev->event_lock, flags);
917                 drm_crtc_send_vblank_event(crtc, crtc->state->event);
918                 crtc->state->event = NULL;
919                 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
920         }
921 }
922
923 static void dpu_crtc_enable(struct drm_crtc *crtc,
924                 struct drm_crtc_state *old_crtc_state)
925 {
926         struct dpu_crtc *dpu_crtc;
927         struct drm_encoder *encoder;
928         struct msm_drm_private *priv;
929
930         if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
931                 DPU_ERROR("invalid crtc\n");
932                 return;
933         }
934         priv = crtc->dev->dev_private;
935
936         DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
937         dpu_crtc = to_dpu_crtc(crtc);
938
939         drm_for_each_encoder(encoder, crtc->dev) {
940                 if (encoder->crtc != crtc)
941                         continue;
942                 dpu_encoder_register_frame_event_callback(encoder,
943                                 dpu_crtc_frame_event_cb, (void *)crtc);
944         }
945
946         mutex_lock(&dpu_crtc->crtc_lock);
947         trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
948         if (!dpu_crtc->enabled && dpu_crtc->vblank_requested) {
949                 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
950         }
951         dpu_crtc->enabled = true;
952
953         mutex_unlock(&dpu_crtc->crtc_lock);
954
955         /* Enable/restore vblank irq handling */
956         drm_crtc_vblank_on(crtc);
957 }
958
959 struct plane_state {
960         struct dpu_plane_state *dpu_pstate;
961         const struct drm_plane_state *drm_pstate;
962         int stage;
963         u32 pipe_id;
964 };
965
966 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
967                 struct drm_crtc_state *state)
968 {
969         struct dpu_crtc *dpu_crtc;
970         struct plane_state *pstates;
971         struct dpu_crtc_state *cstate;
972
973         const struct drm_plane_state *pstate;
974         struct drm_plane *plane;
975         struct drm_display_mode *mode;
976
977         int cnt = 0, rc = 0, mixer_width, i, z_pos;
978
979         struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
980         int multirect_count = 0;
981         const struct drm_plane_state *pipe_staged[SSPP_MAX];
982         int left_zpos_cnt = 0, right_zpos_cnt = 0;
983         struct drm_rect crtc_rect = { 0 };
984
985         if (!crtc) {
986                 DPU_ERROR("invalid crtc\n");
987                 return -EINVAL;
988         }
989
990         pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
991
992         dpu_crtc = to_dpu_crtc(crtc);
993         cstate = to_dpu_crtc_state(state);
994
995         if (!state->enable || !state->active) {
996                 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
997                                 crtc->base.id, state->enable, state->active);
998                 goto end;
999         }
1000
1001         mode = &state->adjusted_mode;
1002         DPU_DEBUG("%s: check", dpu_crtc->name);
1003
1004         /* force a full mode set if active state changed */
1005         if (state->active_changed)
1006                 state->mode_changed = true;
1007
1008         memset(pipe_staged, 0, sizeof(pipe_staged));
1009
1010         mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
1011
1012         _dpu_crtc_setup_lm_bounds(crtc, state);
1013
1014         crtc_rect.x2 = mode->hdisplay;
1015         crtc_rect.y2 = mode->vdisplay;
1016
1017          /* get plane state for all drm planes associated with crtc state */
1018         drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
1019                 struct drm_rect dst, clip = crtc_rect;
1020
1021                 if (IS_ERR_OR_NULL(pstate)) {
1022                         rc = PTR_ERR(pstate);
1023                         DPU_ERROR("%s: failed to get plane%d state, %d\n",
1024                                         dpu_crtc->name, plane->base.id, rc);
1025                         goto end;
1026                 }
1027                 if (cnt >= DPU_STAGE_MAX * 4)
1028                         continue;
1029
1030                 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
1031                 pstates[cnt].drm_pstate = pstate;
1032                 pstates[cnt].stage = pstate->normalized_zpos;
1033                 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
1034
1035                 if (pipe_staged[pstates[cnt].pipe_id]) {
1036                         multirect_plane[multirect_count].r0 =
1037                                 pipe_staged[pstates[cnt].pipe_id];
1038                         multirect_plane[multirect_count].r1 = pstate;
1039                         multirect_count++;
1040
1041                         pipe_staged[pstates[cnt].pipe_id] = NULL;
1042                 } else {
1043                         pipe_staged[pstates[cnt].pipe_id] = pstate;
1044                 }
1045
1046                 cnt++;
1047
1048                 dst = drm_plane_state_dest(pstate);
1049                 if (!drm_rect_intersect(&clip, &dst)) {
1050                         DPU_ERROR("invalid vertical/horizontal destination\n");
1051                         DPU_ERROR("display: " DRM_RECT_FMT " plane: "
1052                                   DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
1053                                   DRM_RECT_ARG(&dst));
1054                         rc = -E2BIG;
1055                         goto end;
1056                 }
1057         }
1058
1059         for (i = 1; i < SSPP_MAX; i++) {
1060                 if (pipe_staged[i]) {
1061                         dpu_plane_clear_multirect(pipe_staged[i]);
1062
1063                         if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1064                                 DPU_ERROR(
1065                                         "r1 only virt plane:%d not supported\n",
1066                                         pipe_staged[i]->plane->base.id);
1067                                 rc  = -EINVAL;
1068                                 goto end;
1069                         }
1070                 }
1071         }
1072
1073         z_pos = -1;
1074         for (i = 0; i < cnt; i++) {
1075                 /* reset counts at every new blend stage */
1076                 if (pstates[i].stage != z_pos) {
1077                         left_zpos_cnt = 0;
1078                         right_zpos_cnt = 0;
1079                         z_pos = pstates[i].stage;
1080                 }
1081
1082                 /* verify z_pos setting before using it */
1083                 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1084                         DPU_ERROR("> %d plane stages assigned\n",
1085                                         DPU_STAGE_MAX - DPU_STAGE_0);
1086                         rc = -EINVAL;
1087                         goto end;
1088                 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1089                         if (left_zpos_cnt == 2) {
1090                                 DPU_ERROR("> 2 planes @ stage %d on left\n",
1091                                         z_pos);
1092                                 rc = -EINVAL;
1093                                 goto end;
1094                         }
1095                         left_zpos_cnt++;
1096
1097                 } else {
1098                         if (right_zpos_cnt == 2) {
1099                                 DPU_ERROR("> 2 planes @ stage %d on right\n",
1100                                         z_pos);
1101                                 rc = -EINVAL;
1102                                 goto end;
1103                         }
1104                         right_zpos_cnt++;
1105                 }
1106
1107                 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1108                 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1109         }
1110
1111         for (i = 0; i < multirect_count; i++) {
1112                 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1113                         DPU_ERROR(
1114                         "multirect validation failed for planes (%d - %d)\n",
1115                                         multirect_plane[i].r0->plane->base.id,
1116                                         multirect_plane[i].r1->plane->base.id);
1117                         rc = -EINVAL;
1118                         goto end;
1119                 }
1120         }
1121
1122         rc = dpu_core_perf_crtc_check(crtc, state);
1123         if (rc) {
1124                 DPU_ERROR("crtc%d failed performance check %d\n",
1125                                 crtc->base.id, rc);
1126                 goto end;
1127         }
1128
1129         /* validate source split:
1130          * use pstates sorted by stage to check planes on same stage
1131          * we assume that all pipes are in source split so its valid to compare
1132          * without taking into account left/right mixer placement
1133          */
1134         for (i = 1; i < cnt; i++) {
1135                 struct plane_state *prv_pstate, *cur_pstate;
1136                 struct drm_rect left_rect, right_rect;
1137                 int32_t left_pid, right_pid;
1138                 int32_t stage;
1139
1140                 prv_pstate = &pstates[i - 1];
1141                 cur_pstate = &pstates[i];
1142                 if (prv_pstate->stage != cur_pstate->stage)
1143                         continue;
1144
1145                 stage = cur_pstate->stage;
1146
1147                 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1148                 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1149
1150                 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1151                 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1152
1153                 if (right_rect.x1 < left_rect.x1) {
1154                         swap(left_pid, right_pid);
1155                         swap(left_rect, right_rect);
1156                 }
1157
1158                 /**
1159                  * - planes are enumerated in pipe-priority order such that
1160                  *   planes with lower drm_id must be left-most in a shared
1161                  *   blend-stage when using source split.
1162                  * - planes in source split must be contiguous in width
1163                  * - planes in source split must have same dest yoff and height
1164                  */
1165                 if (right_pid < left_pid) {
1166                         DPU_ERROR(
1167                                 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1168                                 stage, left_pid, right_pid);
1169                         rc = -EINVAL;
1170                         goto end;
1171                 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1172                         DPU_ERROR("non-contiguous coordinates for src split. "
1173                                   "stage: %d left: " DRM_RECT_FMT " right: "
1174                                   DRM_RECT_FMT "\n", stage,
1175                                   DRM_RECT_ARG(&left_rect),
1176                                   DRM_RECT_ARG(&right_rect));
1177                         rc = -EINVAL;
1178                         goto end;
1179                 } else if (left_rect.y1 != right_rect.y1 ||
1180                            drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1181                         DPU_ERROR("source split at stage: %d. invalid "
1182                                   "yoff/height: left: " DRM_RECT_FMT " right: "
1183                                   DRM_RECT_FMT "\n", stage,
1184                                   DRM_RECT_ARG(&left_rect),
1185                                   DRM_RECT_ARG(&right_rect));
1186                         rc = -EINVAL;
1187                         goto end;
1188                 }
1189         }
1190
1191 end:
1192         kfree(pstates);
1193         return rc;
1194 }
1195
1196 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1197 {
1198         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1199
1200         mutex_lock(&dpu_crtc->crtc_lock);
1201         trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1202         if (dpu_crtc->enabled) {
1203                 _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
1204         }
1205         dpu_crtc->vblank_requested = en;
1206         mutex_unlock(&dpu_crtc->crtc_lock);
1207
1208         return 0;
1209 }
1210
1211 #ifdef CONFIG_DEBUG_FS
1212 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1213 {
1214         struct dpu_crtc *dpu_crtc;
1215         struct dpu_plane_state *pstate = NULL;
1216         struct dpu_crtc_mixer *m;
1217
1218         struct drm_crtc *crtc;
1219         struct drm_plane *plane;
1220         struct drm_display_mode *mode;
1221         struct drm_framebuffer *fb;
1222         struct drm_plane_state *state;
1223         struct dpu_crtc_state *cstate;
1224
1225         int i, out_width;
1226
1227         if (!s || !s->private)
1228                 return -EINVAL;
1229
1230         dpu_crtc = s->private;
1231         crtc = &dpu_crtc->base;
1232
1233         drm_modeset_lock_all(crtc->dev);
1234         cstate = to_dpu_crtc_state(crtc->state);
1235
1236         mutex_lock(&dpu_crtc->crtc_lock);
1237         mode = &crtc->state->adjusted_mode;
1238         out_width = _dpu_crtc_get_mixer_width(cstate, mode);
1239
1240         seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1241                                 mode->hdisplay, mode->vdisplay);
1242
1243         seq_puts(s, "\n");
1244
1245         for (i = 0; i < cstate->num_mixers; ++i) {
1246                 m = &cstate->mixers[i];
1247                 if (!m->hw_lm)
1248                         seq_printf(s, "\tmixer[%d] has no lm\n", i);
1249                 else if (!m->lm_ctl)
1250                         seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1251                 else
1252                         seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1253                                 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1254                                 out_width, mode->vdisplay);
1255         }
1256
1257         seq_puts(s, "\n");
1258
1259         drm_atomic_crtc_for_each_plane(plane, crtc) {
1260                 pstate = to_dpu_plane_state(plane->state);
1261                 state = plane->state;
1262
1263                 if (!pstate || !state)
1264                         continue;
1265
1266                 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1267                         pstate->stage);
1268
1269                 if (plane->state->fb) {
1270                         fb = plane->state->fb;
1271
1272                         seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1273                                 fb->base.id, (char *) &fb->format->format,
1274                                 fb->width, fb->height);
1275                         for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1276                                 seq_printf(s, "cpp[%d]:%u ",
1277                                                 i, fb->format->cpp[i]);
1278                         seq_puts(s, "\n\t");
1279
1280                         seq_printf(s, "modifier:%8llu ", fb->modifier);
1281                         seq_puts(s, "\n");
1282
1283                         seq_puts(s, "\t");
1284                         for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1285                                 seq_printf(s, "pitches[%d]:%8u ", i,
1286                                                         fb->pitches[i]);
1287                         seq_puts(s, "\n");
1288
1289                         seq_puts(s, "\t");
1290                         for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1291                                 seq_printf(s, "offsets[%d]:%8u ", i,
1292                                                         fb->offsets[i]);
1293                         seq_puts(s, "\n");
1294                 }
1295
1296                 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1297                         state->src_x, state->src_y, state->src_w, state->src_h);
1298
1299                 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1300                         state->crtc_x, state->crtc_y, state->crtc_w,
1301                         state->crtc_h);
1302                 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1303                         pstate->multirect_mode, pstate->multirect_index);
1304
1305                 seq_puts(s, "\n");
1306         }
1307         if (dpu_crtc->vblank_cb_count) {
1308                 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1309                 s64 diff_ms = ktime_to_ms(diff);
1310                 s64 fps = diff_ms ? div_s64(
1311                                 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1312
1313                 seq_printf(s,
1314                         "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1315                                 fps, dpu_crtc->vblank_cb_count,
1316                                 ktime_to_ms(diff), dpu_crtc->play_count);
1317
1318                 /* reset time & count for next measurement */
1319                 dpu_crtc->vblank_cb_count = 0;
1320                 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1321         }
1322
1323         seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
1324
1325         mutex_unlock(&dpu_crtc->crtc_lock);
1326         drm_modeset_unlock_all(crtc->dev);
1327
1328         return 0;
1329 }
1330
1331 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1332 {
1333         return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1334 }
1335
1336 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
1337 static int __prefix ## _open(struct inode *inode, struct file *file)    \
1338 {                                                                       \
1339         return single_open(file, __prefix ## _show, inode->i_private);  \
1340 }                                                                       \
1341 static const struct file_operations __prefix ## _fops = {               \
1342         .owner = THIS_MODULE,                                           \
1343         .open = __prefix ## _open,                                      \
1344         .release = single_release,                                      \
1345         .read = seq_read,                                               \
1346         .llseek = seq_lseek,                                            \
1347 }
1348
1349 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1350 {
1351         struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1352         struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1353         int i;
1354
1355         seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1356         seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1357         seq_printf(s, "core_clk_rate: %llu\n",
1358                         dpu_crtc->cur_perf.core_clk_rate);
1359         for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
1360                         i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
1361                 seq_printf(s, "bw_ctl[%d]: %llu\n", i,
1362                                 dpu_crtc->cur_perf.bw_ctl[i]);
1363                 seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
1364                                 dpu_crtc->cur_perf.max_per_pipe_ib[i]);
1365         }
1366
1367         return 0;
1368 }
1369 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1370
1371 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1372 {
1373         struct dpu_crtc *dpu_crtc;
1374         struct dpu_kms *dpu_kms;
1375
1376         static const struct file_operations debugfs_status_fops = {
1377                 .open =         _dpu_debugfs_status_open,
1378                 .read =         seq_read,
1379                 .llseek =       seq_lseek,
1380                 .release =      single_release,
1381         };
1382
1383         if (!crtc)
1384                 return -EINVAL;
1385         dpu_crtc = to_dpu_crtc(crtc);
1386
1387         dpu_kms = _dpu_crtc_get_kms(crtc);
1388
1389         dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1390                         crtc->dev->primary->debugfs_root);
1391         if (!dpu_crtc->debugfs_root)
1392                 return -ENOMEM;
1393
1394         /* don't error check these */
1395         debugfs_create_file("status", 0400,
1396                         dpu_crtc->debugfs_root,
1397                         dpu_crtc, &debugfs_status_fops);
1398         debugfs_create_file("state", 0600,
1399                         dpu_crtc->debugfs_root,
1400                         &dpu_crtc->base,
1401                         &dpu_crtc_debugfs_state_fops);
1402
1403         return 0;
1404 }
1405
1406 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1407 {
1408         struct dpu_crtc *dpu_crtc;
1409
1410         if (!crtc)
1411                 return;
1412         dpu_crtc = to_dpu_crtc(crtc);
1413         debugfs_remove_recursive(dpu_crtc->debugfs_root);
1414 }
1415 #else
1416 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1417 {
1418         return 0;
1419 }
1420
1421 static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
1422 {
1423 }
1424 #endif /* CONFIG_DEBUG_FS */
1425
1426 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1427 {
1428         return _dpu_crtc_init_debugfs(crtc);
1429 }
1430
1431 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1432 {
1433         _dpu_crtc_destroy_debugfs(crtc);
1434 }
1435
1436 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1437         .set_config = drm_atomic_helper_set_config,
1438         .destroy = dpu_crtc_destroy,
1439         .page_flip = drm_atomic_helper_page_flip,
1440         .reset = dpu_crtc_reset,
1441         .atomic_duplicate_state = dpu_crtc_duplicate_state,
1442         .atomic_destroy_state = dpu_crtc_destroy_state,
1443         .late_register = dpu_crtc_late_register,
1444         .early_unregister = dpu_crtc_early_unregister,
1445 };
1446
1447 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1448         .disable = dpu_crtc_disable,
1449         .atomic_enable = dpu_crtc_enable,
1450         .atomic_check = dpu_crtc_atomic_check,
1451         .atomic_begin = dpu_crtc_atomic_begin,
1452         .atomic_flush = dpu_crtc_atomic_flush,
1453 };
1454
1455 /* initialize crtc */
1456 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1457                                 struct drm_plane *cursor)
1458 {
1459         struct drm_crtc *crtc = NULL;
1460         struct dpu_crtc *dpu_crtc = NULL;
1461         struct msm_drm_private *priv = NULL;
1462         struct dpu_kms *kms = NULL;
1463         int i;
1464
1465         priv = dev->dev_private;
1466         kms = to_dpu_kms(priv->kms);
1467
1468         dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1469         if (!dpu_crtc)
1470                 return ERR_PTR(-ENOMEM);
1471
1472         crtc = &dpu_crtc->base;
1473         crtc->dev = dev;
1474
1475         mutex_init(&dpu_crtc->crtc_lock);
1476         spin_lock_init(&dpu_crtc->spin_lock);
1477         atomic_set(&dpu_crtc->frame_pending, 0);
1478
1479         init_completion(&dpu_crtc->frame_done_comp);
1480
1481         INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1482
1483         for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1484                 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1485                 list_add(&dpu_crtc->frame_events[i].list,
1486                                 &dpu_crtc->frame_event_list);
1487                 kthread_init_work(&dpu_crtc->frame_events[i].work,
1488                                 dpu_crtc_frame_event_work);
1489         }
1490
1491         drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1492                                 NULL);
1493
1494         drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1495         plane->crtc = crtc;
1496
1497         /* save user friendly CRTC name for later */
1498         snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1499
1500         /* initialize event handling */
1501         spin_lock_init(&dpu_crtc->event_lock);
1502
1503         DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1504         return crtc;
1505 }