1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
7 * Author: Rob Clark <robdclark@gmail.com>
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_probe_helper.h>
22 #include "dpu_hw_catalog.h"
23 #include "dpu_hw_intf.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_hw_dsc.h"
27 #include "dpu_hw_merge3d.h"
28 #include "dpu_formats.h"
29 #include "dpu_encoder_phys.h"
31 #include "dpu_trace.h"
32 #include "dpu_core_irq.h"
33 #include "disp/msm_disp_snapshot.h"
35 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
36 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
38 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
39 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
42 * Two to anticipate panels that can do cmd/vid dynamic switching
43 * plan is to create all possible physical encoder types, and switch between
46 #define NUM_PHYS_ENCODER_TYPES 2
48 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
49 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
51 #define MAX_CHANNELS_PER_ENC 2
53 #define IDLE_SHORT_TIMEOUT 1
55 #define MAX_HDISPLAY_SPLIT 1080
57 /* timeout in frames waiting for frame done */
58 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
61 * enum dpu_enc_rc_events - events for resource control state machine
62 * @DPU_ENC_RC_EVENT_KICKOFF:
63 * This event happens at NORMAL priority.
64 * Event that signals the start of the transfer. When this event is
65 * received, enable MDP/DSI core clocks. Regardless of the previous
66 * state, the resource should be in ON state at the end of this event.
67 * @DPU_ENC_RC_EVENT_FRAME_DONE:
68 * This event happens at INTERRUPT level.
69 * Event signals the end of the data transfer after the PP FRAME_DONE
70 * event. At the end of this event, a delayed work is scheduled to go to
71 * IDLE_PC state after IDLE_TIMEOUT time.
72 * @DPU_ENC_RC_EVENT_PRE_STOP:
73 * This event happens at NORMAL priority.
74 * This event, when received during the ON state, leave the RC STATE
75 * in the PRE_OFF state. It should be followed by the STOP event as
76 * part of encoder disable.
77 * If received during IDLE or OFF states, it will do nothing.
78 * @DPU_ENC_RC_EVENT_STOP:
79 * This event happens at NORMAL priority.
80 * When this event is received, disable all the MDP/DSI core clocks, and
81 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
82 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
83 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
84 * Resource state should be in OFF at the end of the event.
85 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
86 * This event happens at NORMAL priority from a work item.
87 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
88 * This would disable MDP/DSI core clocks and change the resource state
91 enum dpu_enc_rc_events {
92 DPU_ENC_RC_EVENT_KICKOFF = 1,
93 DPU_ENC_RC_EVENT_FRAME_DONE,
94 DPU_ENC_RC_EVENT_PRE_STOP,
95 DPU_ENC_RC_EVENT_STOP,
96 DPU_ENC_RC_EVENT_ENTER_IDLE
100 * enum dpu_enc_rc_states - states that the resource control maintains
101 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
102 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
103 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
104 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
105 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
107 enum dpu_enc_rc_states {
108 DPU_ENC_RC_STATE_OFF,
109 DPU_ENC_RC_STATE_PRE_OFF,
111 DPU_ENC_RC_STATE_IDLE
115 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
116 * encoders. Virtual encoder manages one "logical" display. Physical
117 * encoders manage one intf block, tied to a specific panel/sub-panel.
118 * Virtual encoder defers as much as possible to the physical encoders.
119 * Virtual encoder registers itself with the DRM Framework as the encoder.
120 * @base: drm_encoder base class for registration with DRM
121 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
122 * @enabled: True if the encoder is active, protected by enc_lock
123 * @num_phys_encs: Actual number of physical encoders contained.
124 * @phys_encs: Container of physical encoders managed.
125 * @cur_master: Pointer to the current master in this mode. Optimization
126 * Only valid after enable. Cleared as disable.
127 * @cur_slave: As above but for the slave encoder.
128 * @hw_pp: Handle to the pingpong blocks used for the display. No.
129 * pingpong blocks can be different than num_phys_encs.
130 * @hw_dsc: Handle to the DSC blocks used for the display.
131 * @dsc_mask: Bitmask of used DSC blocks.
132 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
133 * for partial update right-only cases, such as pingpong
134 * split where virtual pingpong does not generate IRQs
135 * @crtc: Pointer to the currently assigned crtc. Normally you
136 * would use crtc->state->encoder_mask to determine the
137 * link between encoder/crtc. However in this case we need
138 * to track crtc in the disable() hook which is called
139 * _after_ encoder_mask is cleared.
140 * @connector: If a mode is set, cached pointer to the active connector
141 * @crtc_kickoff_cb: Callback into CRTC that will flush & start
143 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
144 * @debugfs_root: Debug file system root file node
145 * @enc_lock: Lock around physical encoder
146 * create/destroy/enable/disable
147 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
148 * busy processing current command.
149 * Bit0 = phys_encs[0] etc.
150 * @crtc_frame_event_cb: callback handler for frame event
151 * @crtc_frame_event_cb_data: callback handler private data
152 * @frame_done_timeout_ms: frame done timeout in ms
153 * @frame_done_timer: watchdog timer for frame done event
154 * @vsync_event_timer: vsync timer
155 * @disp_info: local copy of msm_display_info struct
156 * @idle_pc_supported: indicate if idle power collaps is supported
157 * @rc_lock: resource control mutex lock to protect
158 * virt encoder over various state changes
159 * @rc_state: resource controller state
160 * @delayed_off_work: delayed worker to schedule disabling of
161 * clks and resources after IDLE_TIMEOUT time.
162 * @vsync_event_work: worker to handle vsync event for autorefresh
163 * @topology: topology of the display
164 * @idle_timeout: idle timeout duration in milliseconds
165 * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
167 struct dpu_encoder_virt {
168 struct drm_encoder base;
169 spinlock_t enc_spinlock;
173 unsigned int num_phys_encs;
174 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
175 struct dpu_encoder_phys *cur_master;
176 struct dpu_encoder_phys *cur_slave;
177 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
178 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
180 unsigned int dsc_mask;
184 struct drm_crtc *crtc;
185 struct drm_connector *connector;
187 struct dentry *debugfs_root;
188 struct mutex enc_lock;
189 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
190 void (*crtc_frame_event_cb)(void *, u32 event);
191 void *crtc_frame_event_cb_data;
193 atomic_t frame_done_timeout_ms;
194 struct timer_list frame_done_timer;
195 struct timer_list vsync_event_timer;
197 struct msm_display_info disp_info;
199 bool idle_pc_supported;
200 struct mutex rc_lock;
201 enum dpu_enc_rc_states rc_state;
202 struct delayed_work delayed_off_work;
203 struct kthread_work vsync_event_work;
204 struct msm_display_topology topology;
210 /* DSC configuration */
211 struct msm_display_dsc_config *dsc;
214 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
216 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
217 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
221 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
223 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
225 return dpu_enc->wide_bus_en;
228 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
230 struct dpu_hw_dither_cfg dither_cfg = { 0 };
232 if (!hw_pp->ops.setup_dither)
237 dither_cfg.c0_bitdepth = 6;
238 dither_cfg.c1_bitdepth = 6;
239 dither_cfg.c2_bitdepth = 6;
240 dither_cfg.c3_bitdepth = 6;
241 dither_cfg.temporal_en = 0;
244 hw_pp->ops.setup_dither(hw_pp, NULL);
248 memcpy(&dither_cfg.matrix, dither_matrix,
249 sizeof(u32) * DITHER_MATRIX_SZ);
251 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
254 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
257 case INTF_MODE_VIDEO:
258 return "INTF_MODE_VIDEO";
260 return "INTF_MODE_CMD";
261 case INTF_MODE_WB_BLOCK:
262 return "INTF_MODE_WB_BLOCK";
263 case INTF_MODE_WB_LINE:
264 return "INTF_MODE_WB_LINE";
266 return "INTF_MODE_UNKNOWN";
270 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
271 enum dpu_intr_idx intr_idx)
273 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
274 DRMID(phys_enc->parent),
275 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
276 phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
277 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
279 if (phys_enc->parent_ops->handle_frame_done)
280 phys_enc->parent_ops->handle_frame_done(
281 phys_enc->parent, phys_enc,
282 DPU_ENCODER_FRAME_EVENT_ERROR);
285 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
286 u32 irq_idx, struct dpu_encoder_wait_info *info);
288 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
290 void (*func)(void *arg, int irq_idx),
291 struct dpu_encoder_wait_info *wait_info)
297 DPU_ERROR("invalid params\n");
300 /* note: do master / slave checking outside */
302 /* return EWOULDBLOCK since we know the wait isn't necessary */
303 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
304 DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
305 DRMID(phys_enc->parent), func,
311 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
312 DRMID(phys_enc->parent), func);
316 DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
317 DRMID(phys_enc->parent), func,
318 irq, phys_enc->hw_pp->idx - PINGPONG_0,
319 atomic_read(wait_info->atomic_cnt));
321 ret = dpu_encoder_helper_wait_event_timeout(
322 DRMID(phys_enc->parent),
327 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
331 DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
332 DRMID(phys_enc->parent), func,
334 phys_enc->hw_pp->idx - PINGPONG_0,
335 atomic_read(wait_info->atomic_cnt));
336 local_irq_save(flags);
338 local_irq_restore(flags);
342 DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
343 DRMID(phys_enc->parent), func,
345 phys_enc->hw_pp->idx - PINGPONG_0,
346 atomic_read(wait_info->atomic_cnt));
350 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
352 phys_enc->hw_pp->idx - PINGPONG_0,
353 atomic_read(wait_info->atomic_cnt));
359 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
361 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
362 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
363 return phys ? atomic_read(&phys->vsync_cnt) : 0;
366 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
368 struct dpu_encoder_virt *dpu_enc;
369 struct dpu_encoder_phys *phys;
372 dpu_enc = to_dpu_encoder_virt(drm_enc);
373 phys = dpu_enc ? dpu_enc->cur_master : NULL;
375 if (phys && phys->ops.get_line_count)
376 linecount = phys->ops.get_line_count(phys);
381 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
383 struct dpu_encoder_virt *dpu_enc = NULL;
387 DPU_ERROR("invalid encoder\n");
391 dpu_enc = to_dpu_encoder_virt(drm_enc);
392 DPU_DEBUG_ENC(dpu_enc, "\n");
394 mutex_lock(&dpu_enc->enc_lock);
396 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
397 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
399 if (phys->ops.destroy) {
400 phys->ops.destroy(phys);
401 --dpu_enc->num_phys_encs;
402 dpu_enc->phys_encs[i] = NULL;
406 if (dpu_enc->num_phys_encs)
407 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
408 dpu_enc->num_phys_encs);
409 dpu_enc->num_phys_encs = 0;
410 mutex_unlock(&dpu_enc->enc_lock);
412 drm_encoder_cleanup(drm_enc);
413 mutex_destroy(&dpu_enc->enc_lock);
416 void dpu_encoder_helper_split_config(
417 struct dpu_encoder_phys *phys_enc,
418 enum dpu_intf interface)
420 struct dpu_encoder_virt *dpu_enc;
421 struct split_pipe_cfg cfg = { 0 };
422 struct dpu_hw_mdp *hw_mdptop;
423 struct msm_display_info *disp_info;
425 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
426 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
430 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
431 hw_mdptop = phys_enc->hw_mdptop;
432 disp_info = &dpu_enc->disp_info;
434 if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
438 * disable split modes since encoder will be operating in as the only
439 * encoder, either for the entire use case in the case of, for example,
440 * single DSI, or for this frame in the case of left/right only partial
443 if (phys_enc->split_role == ENC_ROLE_SOLO) {
444 if (hw_mdptop->ops.setup_split_pipe)
445 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
450 cfg.mode = phys_enc->intf_mode;
451 cfg.intf = interface;
453 if (cfg.en && phys_enc->ops.needs_single_flush &&
454 phys_enc->ops.needs_single_flush(phys_enc))
455 cfg.split_flush_en = true;
457 if (phys_enc->split_role == ENC_ROLE_MASTER) {
458 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
460 if (hw_mdptop->ops.setup_split_pipe)
461 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
465 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
467 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
468 int i, intf_count = 0, num_dsc = 0;
470 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
471 if (dpu_enc->phys_encs[i])
474 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
478 return (num_dsc > 0) && (num_dsc > intf_count);
481 static struct msm_display_topology dpu_encoder_get_topology(
482 struct dpu_encoder_virt *dpu_enc,
483 struct dpu_kms *dpu_kms,
484 struct drm_display_mode *mode)
486 struct msm_display_topology topology = {0};
487 int i, intf_count = 0;
489 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
490 if (dpu_enc->phys_encs[i])
493 /* Datapath topology selection
496 * 2 LM, 2 INTF ( Split display using 2 interfaces)
500 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
502 * Adding color blocks only to primary interface if available in
507 else if (!dpu_kms->catalog->caps->has_3d_merge)
510 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
512 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
513 if (dpu_kms->catalog->dspp &&
514 (dpu_kms->catalog->dspp_count >= topology.num_lm))
515 topology.num_dspp = topology.num_lm;
518 topology.num_enc = 0;
519 topology.num_intf = intf_count;
522 /* In case of Display Stream Compression (DSC), we would use
523 * 2 encoders, 2 layer mixers and 1 interface
524 * this is power optimal and can drive up to (including) 4k
527 topology.num_enc = 2;
528 topology.num_dsc = 2;
529 topology.num_intf = 1;
536 static int dpu_encoder_virt_atomic_check(
537 struct drm_encoder *drm_enc,
538 struct drm_crtc_state *crtc_state,
539 struct drm_connector_state *conn_state)
541 struct dpu_encoder_virt *dpu_enc;
542 struct msm_drm_private *priv;
543 struct dpu_kms *dpu_kms;
544 struct drm_display_mode *adj_mode;
545 struct msm_display_topology topology;
546 struct dpu_global_state *global_state;
550 if (!drm_enc || !crtc_state || !conn_state) {
551 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
552 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
556 dpu_enc = to_dpu_encoder_virt(drm_enc);
557 DPU_DEBUG_ENC(dpu_enc, "\n");
559 priv = drm_enc->dev->dev_private;
560 dpu_kms = to_dpu_kms(priv->kms);
561 adj_mode = &crtc_state->adjusted_mode;
562 global_state = dpu_kms_get_global_state(crtc_state->state);
563 if (IS_ERR(global_state))
564 return PTR_ERR(global_state);
566 trace_dpu_enc_atomic_check(DRMID(drm_enc));
568 /* perform atomic check on the first physical encoder (master) */
569 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
570 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
572 if (phys->ops.atomic_check)
573 ret = phys->ops.atomic_check(phys, crtc_state,
576 DPU_ERROR_ENC(dpu_enc,
577 "mode unsupported, phys idx %d\n", i);
582 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
584 /* Reserve dynamic resources now. */
587 * Release and Allocate resources on every modeset
588 * Dont allocate when active is false.
590 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
591 dpu_rm_release(global_state, drm_enc);
593 if (!crtc_state->active_changed || crtc_state->active)
594 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
595 drm_enc, crtc_state, topology);
599 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
604 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
605 struct msm_display_info *disp_info)
607 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
608 struct msm_drm_private *priv;
609 struct dpu_kms *dpu_kms;
610 struct dpu_hw_mdp *hw_mdptop;
611 struct drm_encoder *drm_enc;
614 if (!dpu_enc || !disp_info) {
615 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
616 dpu_enc != NULL, disp_info != NULL);
618 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
619 DPU_ERROR("invalid num phys enc %d/%d\n",
620 dpu_enc->num_phys_encs,
621 (int) ARRAY_SIZE(dpu_enc->hw_pp));
625 drm_enc = &dpu_enc->base;
626 /* this pointers are checked in virt_enable_helper */
627 priv = drm_enc->dev->dev_private;
629 dpu_kms = to_dpu_kms(priv->kms);
630 hw_mdptop = dpu_kms->hw_mdp;
632 DPU_ERROR("invalid mdptop\n");
636 if (hw_mdptop->ops.setup_vsync_source &&
637 disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
638 for (i = 0; i < dpu_enc->num_phys_encs; i++)
639 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
641 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
642 if (disp_info->is_te_using_watchdog_timer)
643 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
645 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
647 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
651 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
653 struct dpu_encoder_virt *dpu_enc;
657 DPU_ERROR("invalid encoder\n");
661 dpu_enc = to_dpu_encoder_virt(drm_enc);
663 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
664 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
665 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
667 if (phys->ops.irq_control)
668 phys->ops.irq_control(phys, enable);
673 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
676 struct msm_drm_private *priv;
677 struct dpu_kms *dpu_kms;
678 struct dpu_encoder_virt *dpu_enc;
680 dpu_enc = to_dpu_encoder_virt(drm_enc);
681 priv = drm_enc->dev->dev_private;
682 dpu_kms = to_dpu_kms(priv->kms);
684 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
686 if (!dpu_enc->cur_master) {
687 DPU_ERROR("encoder master not set\n");
692 /* enable DPU core clks */
693 pm_runtime_get_sync(&dpu_kms->pdev->dev);
695 /* enable all the irq */
696 _dpu_encoder_irq_control(drm_enc, true);
699 /* disable all the irq */
700 _dpu_encoder_irq_control(drm_enc, false);
702 /* disable DPU core clks */
703 pm_runtime_put_sync(&dpu_kms->pdev->dev);
708 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
711 struct dpu_encoder_virt *dpu_enc;
712 struct msm_drm_private *priv;
713 bool is_vid_mode = false;
715 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
716 DPU_ERROR("invalid parameters\n");
719 dpu_enc = to_dpu_encoder_virt(drm_enc);
720 priv = drm_enc->dev->dev_private;
721 is_vid_mode = dpu_enc->disp_info.capabilities &
722 MSM_DISPLAY_CAP_VID_MODE;
725 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
726 * events and return early for other events (ie wb display).
728 if (!dpu_enc->idle_pc_supported &&
729 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
730 sw_event != DPU_ENC_RC_EVENT_STOP &&
731 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
734 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
735 dpu_enc->rc_state, "begin");
738 case DPU_ENC_RC_EVENT_KICKOFF:
739 /* cancel delayed off work, if any */
740 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
741 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
744 mutex_lock(&dpu_enc->rc_lock);
746 /* return if the resource control is already in ON state */
747 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
748 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
749 DRMID(drm_enc), sw_event);
750 mutex_unlock(&dpu_enc->rc_lock);
752 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
753 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
754 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
755 DRMID(drm_enc), sw_event,
757 mutex_unlock(&dpu_enc->rc_lock);
761 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
762 _dpu_encoder_irq_control(drm_enc, true);
764 _dpu_encoder_resource_control_helper(drm_enc, true);
766 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
768 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
769 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
772 mutex_unlock(&dpu_enc->rc_lock);
775 case DPU_ENC_RC_EVENT_FRAME_DONE:
777 * mutex lock is not used as this event happens at interrupt
778 * context. And locking is not required as, the other events
779 * like KICKOFF and STOP does a wait-for-idle before executing
780 * the resource_control
782 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
783 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
784 DRMID(drm_enc), sw_event,
790 * schedule off work item only when there are no
793 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
794 DRM_DEBUG_KMS("id:%d skip schedule work\n",
799 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
800 msecs_to_jiffies(dpu_enc->idle_timeout));
802 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
803 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
807 case DPU_ENC_RC_EVENT_PRE_STOP:
808 /* cancel delayed off work, if any */
809 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
810 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
813 mutex_lock(&dpu_enc->rc_lock);
816 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
817 _dpu_encoder_irq_control(drm_enc, true);
819 /* skip if is already OFF or IDLE, resources are off already */
820 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
821 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
822 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
823 DRMID(drm_enc), sw_event,
825 mutex_unlock(&dpu_enc->rc_lock);
829 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
831 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
832 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
835 mutex_unlock(&dpu_enc->rc_lock);
838 case DPU_ENC_RC_EVENT_STOP:
839 mutex_lock(&dpu_enc->rc_lock);
841 /* return if the resource control is already in OFF state */
842 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
843 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
844 DRMID(drm_enc), sw_event);
845 mutex_unlock(&dpu_enc->rc_lock);
847 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
848 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
849 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
850 mutex_unlock(&dpu_enc->rc_lock);
855 * expect to arrive here only if in either idle state or pre-off
856 * and in IDLE state the resources are already disabled
858 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
859 _dpu_encoder_resource_control_helper(drm_enc, false);
861 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
863 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
864 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
867 mutex_unlock(&dpu_enc->rc_lock);
870 case DPU_ENC_RC_EVENT_ENTER_IDLE:
871 mutex_lock(&dpu_enc->rc_lock);
873 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
874 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
875 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
876 mutex_unlock(&dpu_enc->rc_lock);
881 * if we are in ON but a frame was just kicked off,
882 * ignore the IDLE event, it's probably a stale timer event
884 if (dpu_enc->frame_busy_mask[0]) {
885 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
886 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
887 mutex_unlock(&dpu_enc->rc_lock);
892 _dpu_encoder_irq_control(drm_enc, false);
894 _dpu_encoder_resource_control_helper(drm_enc, false);
896 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
898 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
899 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
902 mutex_unlock(&dpu_enc->rc_lock);
906 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
908 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
909 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
914 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
915 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
920 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
921 struct drm_writeback_job *job)
923 struct dpu_encoder_virt *dpu_enc;
926 dpu_enc = to_dpu_encoder_virt(drm_enc);
928 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
929 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
931 if (phys->ops.prepare_wb_job)
932 phys->ops.prepare_wb_job(phys, job);
937 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
938 struct drm_writeback_job *job)
940 struct dpu_encoder_virt *dpu_enc;
943 dpu_enc = to_dpu_encoder_virt(drm_enc);
945 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
946 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
948 if (phys->ops.cleanup_wb_job)
949 phys->ops.cleanup_wb_job(phys, job);
954 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
955 struct drm_crtc_state *crtc_state,
956 struct drm_connector_state *conn_state)
958 struct dpu_encoder_virt *dpu_enc;
959 struct msm_drm_private *priv;
960 struct dpu_kms *dpu_kms;
961 struct dpu_crtc_state *cstate;
962 struct dpu_global_state *global_state;
963 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
964 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
965 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
966 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
967 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
968 int num_lm, num_ctl, num_pp, num_dsc;
969 unsigned int dsc_mask = 0;
973 DPU_ERROR("invalid encoder\n");
977 dpu_enc = to_dpu_encoder_virt(drm_enc);
978 DPU_DEBUG_ENC(dpu_enc, "\n");
980 priv = drm_enc->dev->dev_private;
981 dpu_kms = to_dpu_kms(priv->kms);
983 global_state = dpu_kms_get_existing_global_state(dpu_kms);
984 if (IS_ERR_OR_NULL(global_state)) {
985 DPU_ERROR("Failed to get global state");
989 trace_dpu_enc_mode_set(DRMID(drm_enc));
991 /* Query resource that have been reserved in atomic check step. */
992 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
993 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
995 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
996 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
997 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
998 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
999 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1000 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1001 ARRAY_SIZE(hw_dspp));
1003 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1004 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1008 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1009 drm_enc->base.id, DPU_HW_BLK_DSC,
1010 hw_dsc, ARRAY_SIZE(hw_dsc));
1011 for (i = 0; i < num_dsc; i++) {
1012 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1013 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1017 dpu_enc->dsc_mask = dsc_mask;
1019 cstate = to_dpu_crtc_state(crtc_state);
1021 for (i = 0; i < num_lm; i++) {
1022 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1024 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1025 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1026 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1029 cstate->num_mixers = num_lm;
1031 dpu_enc->connector = conn_state->connector;
1033 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1034 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1036 if (!dpu_enc->hw_pp[i]) {
1037 DPU_ERROR_ENC(dpu_enc,
1038 "no pp block assigned at idx: %d\n", i);
1043 DPU_ERROR_ENC(dpu_enc,
1044 "no ctl block assigned at idx: %d\n", i);
1048 phys->hw_pp = dpu_enc->hw_pp[i];
1049 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1051 if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
1052 phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
1054 if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
1055 phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
1057 if (!phys->hw_intf && !phys->hw_wb) {
1058 DPU_ERROR_ENC(dpu_enc,
1059 "no intf or wb block assigned at idx: %d\n", i);
1063 if (phys->hw_intf && phys->hw_wb) {
1064 DPU_ERROR_ENC(dpu_enc,
1065 "invalid phys both intf and wb block at idx: %d\n", i);
1069 phys->cached_mode = crtc_state->adjusted_mode;
1070 if (phys->ops.atomic_mode_set)
1071 phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
1075 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1077 struct dpu_encoder_virt *dpu_enc = NULL;
1080 if (!drm_enc || !drm_enc->dev) {
1081 DPU_ERROR("invalid parameters\n");
1085 dpu_enc = to_dpu_encoder_virt(drm_enc);
1086 if (!dpu_enc || !dpu_enc->cur_master) {
1087 DPU_ERROR("invalid dpu encoder/master\n");
1092 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
1093 dpu_enc->cur_master->hw_mdptop &&
1094 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1095 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1096 dpu_enc->cur_master->hw_mdptop);
1098 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1100 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1101 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1102 unsigned bpc = dpu_enc->connector->display_info.bpc;
1103 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1104 if (!dpu_enc->hw_pp[i])
1106 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1111 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1113 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1115 mutex_lock(&dpu_enc->enc_lock);
1117 if (!dpu_enc->enabled)
1120 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1121 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1122 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1123 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1125 _dpu_encoder_virt_enable_helper(drm_enc);
1128 mutex_unlock(&dpu_enc->enc_lock);
1131 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1133 struct dpu_encoder_virt *dpu_enc = NULL;
1135 struct drm_display_mode *cur_mode = NULL;
1137 dpu_enc = to_dpu_encoder_virt(drm_enc);
1139 mutex_lock(&dpu_enc->enc_lock);
1140 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1142 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1143 cur_mode->vdisplay);
1145 /* always enable slave encoder before master */
1146 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1147 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1149 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1150 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1152 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1154 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1159 _dpu_encoder_virt_enable_helper(drm_enc);
1161 dpu_enc->enabled = true;
1164 mutex_unlock(&dpu_enc->enc_lock);
1167 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1169 struct dpu_encoder_virt *dpu_enc = NULL;
1172 dpu_enc = to_dpu_encoder_virt(drm_enc);
1173 DPU_DEBUG_ENC(dpu_enc, "\n");
1175 mutex_lock(&dpu_enc->enc_lock);
1176 dpu_enc->enabled = false;
1178 trace_dpu_enc_disable(DRMID(drm_enc));
1181 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1183 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1185 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1186 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1188 if (phys->ops.disable)
1189 phys->ops.disable(phys);
1193 /* after phys waits for frame-done, should be no more frames pending */
1194 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1195 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1196 del_timer_sync(&dpu_enc->frame_done_timer);
1199 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1201 dpu_enc->connector = NULL;
1203 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1205 mutex_unlock(&dpu_enc->enc_lock);
1208 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1209 enum dpu_intf_type type, u32 controller_id)
1213 if (type != INTF_WB) {
1214 for (i = 0; i < catalog->intf_count; i++) {
1215 if (catalog->intf[i].type == type
1216 && catalog->intf[i].controller_id == controller_id) {
1217 return catalog->intf[i].id;
1225 static enum dpu_wb dpu_encoder_get_wb(struct dpu_mdss_cfg *catalog,
1226 enum dpu_intf_type type, u32 controller_id)
1230 if (type != INTF_WB)
1233 for (i = 0; i < catalog->wb_count; i++) {
1234 if (catalog->wb[i].id == controller_id)
1235 return catalog->wb[i].id;
1242 static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1243 struct dpu_encoder_phys *phy_enc)
1245 struct dpu_encoder_virt *dpu_enc = NULL;
1246 unsigned long lock_flags;
1248 if (!drm_enc || !phy_enc)
1251 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1252 dpu_enc = to_dpu_encoder_virt(drm_enc);
1254 atomic_inc(&phy_enc->vsync_cnt);
1256 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1258 dpu_crtc_vblank_callback(dpu_enc->crtc);
1259 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1261 DPU_ATRACE_END("encoder_vblank_callback");
1264 static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1265 struct dpu_encoder_phys *phy_enc)
1270 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1271 atomic_inc(&phy_enc->underrun_cnt);
1273 /* trigger dump only on the first underrun */
1274 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1275 msm_disp_snapshot_state(drm_enc->dev);
1277 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1278 atomic_read(&phy_enc->underrun_cnt));
1279 DPU_ATRACE_END("encoder_underrun_callback");
1282 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1284 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1285 unsigned long lock_flags;
1287 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1288 /* crtc should always be cleared before re-assigning */
1289 WARN_ON(crtc && dpu_enc->crtc);
1290 dpu_enc->crtc = crtc;
1291 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1294 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1295 struct drm_crtc *crtc, bool enable)
1297 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1298 unsigned long lock_flags;
1301 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1303 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1304 if (dpu_enc->crtc != crtc) {
1305 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1308 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1310 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1311 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1313 if (phys->ops.control_vblank_irq)
1314 phys->ops.control_vblank_irq(phys, enable);
1318 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1319 void (*frame_event_cb)(void *, u32 event),
1320 void *frame_event_cb_data)
1322 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1323 unsigned long lock_flags;
1326 enable = frame_event_cb ? true : false;
1329 DPU_ERROR("invalid encoder\n");
1332 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1334 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1335 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1336 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1337 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1340 static void dpu_encoder_frame_done_callback(
1341 struct drm_encoder *drm_enc,
1342 struct dpu_encoder_phys *ready_phys, u32 event)
1344 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1347 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1348 | DPU_ENCODER_FRAME_EVENT_ERROR
1349 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1351 if (!dpu_enc->frame_busy_mask[0]) {
1353 * suppress frame_done without waiter,
1354 * likely autorefresh
1356 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1357 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1358 ready_phys->intf_idx, ready_phys->wb_idx);
1362 /* One of the physical encoders has become idle */
1363 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1364 if (dpu_enc->phys_encs[i] == ready_phys) {
1365 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1366 dpu_enc->frame_busy_mask[0]);
1367 clear_bit(i, dpu_enc->frame_busy_mask);
1371 if (!dpu_enc->frame_busy_mask[0]) {
1372 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1373 del_timer(&dpu_enc->frame_done_timer);
1375 dpu_encoder_resource_control(drm_enc,
1376 DPU_ENC_RC_EVENT_FRAME_DONE);
1378 if (dpu_enc->crtc_frame_event_cb)
1379 dpu_enc->crtc_frame_event_cb(
1380 dpu_enc->crtc_frame_event_cb_data,
1384 if (dpu_enc->crtc_frame_event_cb)
1385 dpu_enc->crtc_frame_event_cb(
1386 dpu_enc->crtc_frame_event_cb_data, event);
1390 static void dpu_encoder_off_work(struct work_struct *work)
1392 struct dpu_encoder_virt *dpu_enc = container_of(work,
1393 struct dpu_encoder_virt, delayed_off_work.work);
1395 dpu_encoder_resource_control(&dpu_enc->base,
1396 DPU_ENC_RC_EVENT_ENTER_IDLE);
1398 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1399 DPU_ENCODER_FRAME_EVENT_IDLE);
1403 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1404 * @drm_enc: Pointer to drm encoder structure
1405 * @phys: Pointer to physical encoder structure
1406 * @extra_flush_bits: Additional bit mask to include in flush trigger
1408 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1409 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1411 struct dpu_hw_ctl *ctl;
1412 int pending_kickoff_cnt;
1416 DPU_ERROR("invalid pingpong hw\n");
1421 if (!ctl->ops.trigger_flush) {
1422 DPU_ERROR("missing trigger cb\n");
1426 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1428 if (extra_flush_bits && ctl->ops.update_pending_flush)
1429 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1431 ctl->ops.trigger_flush(ctl);
1433 if (ctl->ops.get_pending_flush)
1434 ret = ctl->ops.get_pending_flush(ctl);
1436 trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1437 dpu_encoder_helper_get_intf_type(phys->intf_mode),
1438 phys->intf_idx, phys->wb_idx,
1439 pending_kickoff_cnt, ctl->idx,
1440 extra_flush_bits, ret);
1444 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1445 * @phys: Pointer to physical encoder structure
1447 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1450 DPU_ERROR("invalid argument(s)\n");
1455 DPU_ERROR("invalid pingpong hw\n");
1459 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1460 phys->ops.trigger_start(phys);
1463 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1465 struct dpu_hw_ctl *ctl;
1467 ctl = phys_enc->hw_ctl;
1468 if (ctl->ops.trigger_start) {
1469 ctl->ops.trigger_start(ctl);
1470 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1474 static int dpu_encoder_helper_wait_event_timeout(
1477 struct dpu_encoder_wait_info *info)
1480 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1481 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1485 rc = wait_event_timeout(*(info->wq),
1486 atomic_read(info->atomic_cnt) == 0, jiffies);
1487 time = ktime_to_ms(ktime_get());
1489 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1491 atomic_read(info->atomic_cnt));
1492 /* If we timed out, counter is valid and time is less, wait again */
1493 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1494 (time < expected_time));
1499 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1501 struct dpu_encoder_virt *dpu_enc;
1502 struct dpu_hw_ctl *ctl;
1504 struct drm_encoder *drm_enc;
1506 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1507 ctl = phys_enc->hw_ctl;
1508 drm_enc = phys_enc->parent;
1510 if (!ctl->ops.reset)
1513 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1516 rc = ctl->ops.reset(ctl);
1518 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1519 msm_disp_snapshot_state(drm_enc->dev);
1522 phys_enc->enable_state = DPU_ENC_ENABLED;
1526 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1527 * Iterate through the physical encoders and perform consolidated flush
1528 * and/or control start triggering as needed. This is done in the virtual
1529 * encoder rather than the individual physical ones in order to handle
1530 * use cases that require visibility into multiple physical encoders at
1532 * @dpu_enc: Pointer to virtual encoder structure
1534 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1536 struct dpu_hw_ctl *ctl;
1537 uint32_t i, pending_flush;
1538 unsigned long lock_flags;
1540 pending_flush = 0x0;
1542 /* update pending counts and trigger kickoff ctl flush atomically */
1543 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1545 /* don't perform flush/start operations for slave encoders */
1546 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1547 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1549 if (phys->enable_state == DPU_ENC_DISABLED)
1555 * This is cleared in frame_done worker, which isn't invoked
1556 * for async commits. So don't set this for async, since it'll
1557 * roll over to the next commit.
1559 if (phys->split_role != ENC_ROLE_SLAVE)
1560 set_bit(i, dpu_enc->frame_busy_mask);
1562 if (!phys->ops.needs_single_flush ||
1563 !phys->ops.needs_single_flush(phys))
1564 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1565 else if (ctl->ops.get_pending_flush)
1566 pending_flush |= ctl->ops.get_pending_flush(ctl);
1569 /* for split flush, combine pending flush masks and send to master */
1570 if (pending_flush && dpu_enc->cur_master) {
1571 _dpu_encoder_trigger_flush(
1573 dpu_enc->cur_master,
1577 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1579 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1582 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1584 struct dpu_encoder_virt *dpu_enc;
1585 struct dpu_encoder_phys *phys;
1587 struct dpu_hw_ctl *ctl;
1588 struct msm_display_info *disp_info;
1591 DPU_ERROR("invalid encoder\n");
1594 dpu_enc = to_dpu_encoder_virt(drm_enc);
1595 disp_info = &dpu_enc->disp_info;
1597 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1598 phys = dpu_enc->phys_encs[i];
1601 if (ctl->ops.clear_pending_flush)
1602 ctl->ops.clear_pending_flush(ctl);
1604 /* update only for command mode primary ctl */
1605 if ((phys == dpu_enc->cur_master) &&
1606 (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1607 && ctl->ops.trigger_pending)
1608 ctl->ops.trigger_pending(ctl);
1612 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1613 struct drm_display_mode *mode)
1620 * For linetime calculation, only operate on master encoder.
1622 if (!dpu_enc->cur_master)
1625 if (!dpu_enc->cur_master->ops.get_line_count) {
1626 DPU_ERROR("get_line_count function not defined\n");
1630 pclk_rate = mode->clock; /* pixel clock in kHz */
1631 if (pclk_rate == 0) {
1632 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1636 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1637 if (pclk_period == 0) {
1638 DPU_ERROR("pclk period is 0\n");
1643 * Line time calculation based on Pixel clock and HTOTAL.
1644 * Final unit is in ns.
1646 line_time = (pclk_period * mode->htotal) / 1000;
1647 if (line_time == 0) {
1648 DPU_ERROR("line time calculation is 0\n");
1652 DPU_DEBUG_ENC(dpu_enc,
1653 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1654 pclk_rate, pclk_period, line_time);
1659 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1661 struct drm_display_mode *mode;
1662 struct dpu_encoder_virt *dpu_enc;
1665 u32 vtotal, time_to_vsync;
1668 dpu_enc = to_dpu_encoder_virt(drm_enc);
1670 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1671 DPU_ERROR("crtc/crtc state object is NULL\n");
1674 mode = &drm_enc->crtc->state->adjusted_mode;
1676 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1680 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1682 vtotal = mode->vtotal;
1683 if (cur_line >= vtotal)
1684 time_to_vsync = line_time * vtotal;
1686 time_to_vsync = line_time * (vtotal - cur_line);
1688 if (time_to_vsync == 0) {
1689 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1694 cur_time = ktime_get();
1695 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1697 DPU_DEBUG_ENC(dpu_enc,
1698 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1699 cur_line, vtotal, time_to_vsync,
1700 ktime_to_ms(cur_time),
1701 ktime_to_ms(*wakeup_time));
1705 static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1707 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1709 struct drm_encoder *drm_enc = &dpu_enc->base;
1710 struct msm_drm_private *priv;
1711 struct msm_drm_thread *event_thread;
1713 if (!drm_enc->dev || !drm_enc->crtc) {
1714 DPU_ERROR("invalid parameters\n");
1718 priv = drm_enc->dev->dev_private;
1720 if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1721 DPU_ERROR("invalid crtc index\n");
1724 event_thread = &priv->event_thread[drm_enc->crtc->index];
1725 if (!event_thread) {
1726 DPU_ERROR("event_thread not found for crtc:%d\n",
1727 drm_enc->crtc->index);
1731 del_timer(&dpu_enc->vsync_event_timer);
1734 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1736 struct dpu_encoder_virt *dpu_enc = container_of(work,
1737 struct dpu_encoder_virt, vsync_event_work);
1738 ktime_t wakeup_time;
1740 if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1743 trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1744 mod_timer(&dpu_enc->vsync_event_timer,
1745 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1749 dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
1752 int ssm_delay, total_pixels, soft_slice_per_enc;
1754 soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
1757 * minimum number of initial line pixels is a sum of:
1758 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1759 * 91 for 10 bpc) * 3
1760 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1761 * 3. the initial xmit delay
1762 * 4. total pipeline delay through the "lock step" of encoder (47)
1763 * 5. 6 additional pixels as the output of the rate buffer is
1766 ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
1767 total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
1768 if (soft_slice_per_enc > 1)
1769 total_pixels += (ssm_delay * 3);
1770 return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
1773 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
1774 struct dpu_hw_pingpong *hw_pp,
1775 struct msm_display_dsc_config *dsc,
1779 if (hw_dsc->ops.dsc_config)
1780 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1782 if (hw_dsc->ops.dsc_config_thresh)
1783 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1785 if (hw_pp->ops.setup_dsc)
1786 hw_pp->ops.setup_dsc(hw_pp);
1788 if (hw_pp->ops.enable_dsc)
1789 hw_pp->ops.enable_dsc(hw_pp);
1792 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1793 struct msm_display_dsc_config *dsc)
1795 /* coding only for 2LM, 2enc, 1 dsc config */
1796 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1797 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1798 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1799 int this_frame_slices;
1800 int intf_ip_w, enc_ip_w;
1801 int dsc_common_mode;
1806 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1807 hw_pp[i] = dpu_enc->hw_pp[i];
1808 hw_dsc[i] = dpu_enc->hw_dsc[i];
1810 if (!hw_pp[i] || !hw_dsc[i]) {
1811 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1816 pic_width = dsc->drm->pic_width;
1818 dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
1819 if (enc_master->intf_mode == INTF_MODE_VIDEO)
1820 dsc_common_mode |= DSC_MODE_VIDEO;
1822 this_frame_slices = pic_width / dsc->drm->slice_width;
1823 intf_ip_w = this_frame_slices * dsc->drm->slice_width;
1826 * dsc merge case: when using 2 encoders for the same stream,
1827 * no. of slices need to be same on both the encoders.
1829 enc_ip_w = intf_ip_w / 2;
1830 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1832 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1833 dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
1836 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1838 struct dpu_encoder_virt *dpu_enc;
1839 struct dpu_encoder_phys *phys;
1840 bool needs_hw_reset = false;
1843 dpu_enc = to_dpu_encoder_virt(drm_enc);
1845 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1847 /* prepare for next kickoff, may include waiting on previous kickoff */
1848 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1849 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1850 phys = dpu_enc->phys_encs[i];
1851 if (phys->ops.prepare_for_kickoff)
1852 phys->ops.prepare_for_kickoff(phys);
1853 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1854 needs_hw_reset = true;
1856 DPU_ATRACE_END("enc_prepare_for_kickoff");
1858 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1860 /* if any phys needs reset, reset all phys, in-order */
1861 if (needs_hw_reset) {
1862 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1863 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1864 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1869 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1872 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1874 struct dpu_encoder_virt *dpu_enc;
1876 struct dpu_encoder_phys *phys;
1878 dpu_enc = to_dpu_encoder_virt(drm_enc);
1880 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1881 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1882 phys = dpu_enc->phys_encs[i];
1883 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1884 DPU_DEBUG("invalid FB not kicking off\n");
1893 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1895 struct dpu_encoder_virt *dpu_enc;
1896 struct dpu_encoder_phys *phys;
1897 ktime_t wakeup_time;
1898 unsigned long timeout_ms;
1901 DPU_ATRACE_BEGIN("encoder_kickoff");
1902 dpu_enc = to_dpu_encoder_virt(drm_enc);
1904 trace_dpu_enc_kickoff(DRMID(drm_enc));
1906 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1907 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1909 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1910 mod_timer(&dpu_enc->frame_done_timer,
1911 jiffies + msecs_to_jiffies(timeout_ms));
1913 /* All phys encs are ready to go, trigger the kickoff */
1914 _dpu_encoder_kickoff_phys(dpu_enc);
1916 /* allow phys encs to handle any post-kickoff business */
1917 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1918 phys = dpu_enc->phys_encs[i];
1919 if (phys->ops.handle_post_kickoff)
1920 phys->ops.handle_post_kickoff(phys);
1923 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1924 !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1925 trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1926 ktime_to_ms(wakeup_time));
1927 mod_timer(&dpu_enc->vsync_event_timer,
1928 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1931 DPU_ATRACE_END("encoder_kickoff");
1934 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
1936 struct dpu_hw_mixer_cfg mixer;
1939 struct dpu_global_state *global_state;
1940 struct dpu_hw_blk *hw_lm[2];
1941 struct dpu_hw_mixer *hw_mixer[2];
1942 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1944 memset(&mixer, 0, sizeof(mixer));
1946 /* reset all mixers for this encoder */
1947 if (phys_enc->hw_ctl->ops.clear_all_blendstages)
1948 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
1950 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
1952 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
1953 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1955 for (i = 0; i < num_lm; i++) {
1956 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
1957 flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
1958 if (phys_enc->hw_ctl->ops.update_pending_flush)
1959 phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
1961 /* clear all blendstages */
1962 if (phys_enc->hw_ctl->ops.setup_blendstage)
1963 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
1967 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
1969 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1970 struct dpu_hw_intf_cfg intf_cfg = { 0 };
1972 struct dpu_encoder_virt *dpu_enc;
1974 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1976 phys_enc->hw_ctl->ops.reset(ctl);
1978 dpu_encoder_helper_reset_mixers(phys_enc);
1981 * TODO: move the once-only operation like CTL flush/trigger
1982 * into dpu_encoder_virt_disable() and all operations which need
1983 * to be done per phys encoder into the phys_disable() op.
1985 if (phys_enc->hw_wb) {
1986 /* disable the PP block */
1987 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
1988 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
1989 phys_enc->hw_pp->idx);
1991 /* mark WB flush as pending */
1992 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
1993 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
1995 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1996 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
1997 phys_enc->hw_intf->ops.bind_pingpong_blk(
1998 dpu_enc->phys_encs[i]->hw_intf, false,
1999 dpu_enc->phys_encs[i]->hw_pp->idx);
2001 /* mark INTF flush as pending */
2002 if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2003 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2004 dpu_enc->phys_encs[i]->hw_intf->idx);
2008 /* reset the merge 3D HW block */
2009 if (phys_enc->hw_pp->merge_3d) {
2010 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2012 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2013 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2014 phys_enc->hw_pp->merge_3d->idx);
2017 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2018 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2019 if (phys_enc->hw_pp->merge_3d)
2020 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2022 if (ctl->ops.reset_intf_cfg)
2023 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2025 ctl->ops.trigger_flush(ctl);
2026 ctl->ops.trigger_start(ctl);
2027 ctl->ops.clear_pending_flush(ctl);
2030 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
2032 struct dpu_encoder_virt *dpu_enc;
2033 struct dpu_encoder_phys *phys;
2037 DPU_ERROR("invalid encoder\n");
2040 dpu_enc = to_dpu_encoder_virt(drm_enc);
2042 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2043 phys = dpu_enc->phys_encs[i];
2044 if (phys->ops.prepare_commit)
2045 phys->ops.prepare_commit(phys);
2049 #ifdef CONFIG_DEBUG_FS
2050 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2052 struct dpu_encoder_virt *dpu_enc = s->private;
2055 mutex_lock(&dpu_enc->enc_lock);
2056 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2057 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2059 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
2060 phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
2061 atomic_read(&phys->vsync_cnt),
2062 atomic_read(&phys->underrun_cnt));
2064 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2066 mutex_unlock(&dpu_enc->enc_lock);
2071 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2073 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2075 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2078 char name[DPU_NAME_SIZE];
2080 if (!drm_enc->dev) {
2081 DPU_ERROR("invalid encoder or kms\n");
2085 snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
2087 /* create overall sub-directory for the encoder */
2088 dpu_enc->debugfs_root = debugfs_create_dir(name,
2089 drm_enc->dev->primary->debugfs_root);
2091 /* don't error check these */
2092 debugfs_create_file("status", 0600,
2093 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
2095 for (i = 0; i < dpu_enc->num_phys_encs; i++)
2096 if (dpu_enc->phys_encs[i]->ops.late_register)
2097 dpu_enc->phys_encs[i]->ops.late_register(
2098 dpu_enc->phys_encs[i],
2099 dpu_enc->debugfs_root);
2104 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2110 static int dpu_encoder_late_register(struct drm_encoder *encoder)
2112 return _dpu_encoder_init_debugfs(encoder);
2115 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
2117 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2119 debugfs_remove_recursive(dpu_enc->debugfs_root);
2122 static int dpu_encoder_virt_add_phys_encs(
2123 struct msm_display_info *disp_info,
2124 struct dpu_encoder_virt *dpu_enc,
2125 struct dpu_enc_phys_init_params *params)
2127 struct dpu_encoder_phys *enc = NULL;
2129 DPU_DEBUG_ENC(dpu_enc, "\n");
2132 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2133 * in this function, check up-front.
2135 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2136 ARRAY_SIZE(dpu_enc->phys_encs)) {
2137 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2138 dpu_enc->num_phys_encs);
2142 if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) {
2143 enc = dpu_encoder_phys_vid_init(params);
2145 if (IS_ERR_OR_NULL(enc)) {
2146 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2148 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2151 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2152 ++dpu_enc->num_phys_encs;
2155 if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
2156 enc = dpu_encoder_phys_cmd_init(params);
2158 if (IS_ERR_OR_NULL(enc)) {
2159 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2161 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2164 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2165 ++dpu_enc->num_phys_encs;
2168 if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
2169 enc = dpu_encoder_phys_wb_init(params);
2171 if (IS_ERR_OR_NULL(enc)) {
2172 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2174 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2177 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2178 ++dpu_enc->num_phys_encs;
2181 if (params->split_role == ENC_ROLE_SLAVE)
2182 dpu_enc->cur_slave = enc;
2184 dpu_enc->cur_master = enc;
2189 static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
2190 .handle_vblank_virt = dpu_encoder_vblank_callback,
2191 .handle_underrun_virt = dpu_encoder_underrun_callback,
2192 .handle_frame_done = dpu_encoder_frame_done_callback,
2195 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2196 struct dpu_kms *dpu_kms,
2197 struct msm_display_info *disp_info)
2201 enum dpu_intf_type intf_type = INTF_NONE;
2202 struct dpu_enc_phys_init_params phys_params;
2205 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2209 dpu_enc->cur_master = NULL;
2211 memset(&phys_params, 0, sizeof(phys_params));
2212 phys_params.dpu_kms = dpu_kms;
2213 phys_params.parent = &dpu_enc->base;
2214 phys_params.parent_ops = &dpu_encoder_parent_ops;
2215 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2217 switch (disp_info->intf_type) {
2218 case DRM_MODE_ENCODER_DSI:
2219 intf_type = INTF_DSI;
2221 case DRM_MODE_ENCODER_TMDS:
2222 intf_type = INTF_DP;
2224 case DRM_MODE_ENCODER_VIRTUAL:
2225 intf_type = INTF_WB;
2229 WARN_ON(disp_info->num_of_h_tiles < 1);
2231 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2233 if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2234 (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2235 dpu_enc->idle_pc_supported =
2236 dpu_kms->catalog->caps->has_idle_pc;
2238 dpu_enc->dsc = disp_info->dsc;
2240 mutex_lock(&dpu_enc->enc_lock);
2241 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2243 * Left-most tile is at index 0, content is controller id
2244 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2245 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2247 u32 controller_id = disp_info->h_tile_instance[i];
2249 if (disp_info->num_of_h_tiles > 1) {
2251 phys_params.split_role = ENC_ROLE_MASTER;
2253 phys_params.split_role = ENC_ROLE_SLAVE;
2255 phys_params.split_role = ENC_ROLE_SOLO;
2258 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2259 i, controller_id, phys_params.split_role);
2261 phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2265 phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
2266 intf_type, controller_id);
2268 * The phys_params might represent either an INTF or a WB unit, but not
2269 * both of them at the same time.
2271 if ((phys_params.intf_idx == INTF_MAX) &&
2272 (phys_params.wb_idx == WB_MAX)) {
2273 DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
2274 intf_type, controller_id);
2278 if ((phys_params.intf_idx != INTF_MAX) &&
2279 (phys_params.wb_idx != WB_MAX)) {
2280 DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
2281 intf_type, controller_id);
2286 ret = dpu_encoder_virt_add_phys_encs(disp_info,
2287 dpu_enc, &phys_params);
2289 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2293 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2294 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2295 atomic_set(&phys->vsync_cnt, 0);
2296 atomic_set(&phys->underrun_cnt, 0);
2298 mutex_unlock(&dpu_enc->enc_lock);
2303 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2305 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2307 struct drm_encoder *drm_enc = &dpu_enc->base;
2310 if (!drm_enc->dev) {
2311 DPU_ERROR("invalid parameters\n");
2315 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2316 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2317 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2319 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2320 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2324 DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2326 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2327 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2328 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2331 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2332 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2333 .disable = dpu_encoder_virt_disable,
2334 .enable = dpu_encoder_virt_enable,
2335 .atomic_check = dpu_encoder_virt_atomic_check,
2338 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2339 .destroy = dpu_encoder_destroy,
2340 .late_register = dpu_encoder_late_register,
2341 .early_unregister = dpu_encoder_early_unregister,
2344 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2345 struct msm_display_info *disp_info)
2347 struct msm_drm_private *priv = dev->dev_private;
2348 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2349 struct drm_encoder *drm_enc = NULL;
2350 struct dpu_encoder_virt *dpu_enc = NULL;
2353 dpu_enc = to_dpu_encoder_virt(enc);
2355 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2359 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2360 timer_setup(&dpu_enc->frame_done_timer,
2361 dpu_encoder_frame_done_timeout, 0);
2363 if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2364 timer_setup(&dpu_enc->vsync_event_timer,
2365 dpu_encoder_vsync_event_handler,
2367 else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
2368 dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
2369 priv->dp[disp_info->h_tile_instance[0]]);
2371 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2372 dpu_encoder_off_work);
2373 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2375 kthread_init_work(&dpu_enc->vsync_event_work,
2376 dpu_encoder_vsync_event_work_handler);
2378 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2380 DPU_DEBUG_ENC(dpu_enc, "created\n");
2385 DPU_ERROR("failed to create encoder\n");
2387 dpu_encoder_destroy(drm_enc);
2394 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2397 struct dpu_encoder_virt *dpu_enc = NULL;
2400 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2402 return ERR_PTR(-ENOMEM);
2405 rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2406 drm_enc_mode, NULL);
2408 devm_kfree(dev->dev, dpu_enc);
2412 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2414 spin_lock_init(&dpu_enc->enc_spinlock);
2415 dpu_enc->enabled = false;
2416 mutex_init(&dpu_enc->enc_lock);
2417 mutex_init(&dpu_enc->rc_lock);
2419 return &dpu_enc->base;
2422 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2423 enum msm_event_wait event)
2425 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2426 struct dpu_encoder_virt *dpu_enc = NULL;
2430 DPU_ERROR("invalid encoder\n");
2433 dpu_enc = to_dpu_encoder_virt(drm_enc);
2434 DPU_DEBUG_ENC(dpu_enc, "\n");
2436 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2437 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2440 case MSM_ENC_COMMIT_DONE:
2441 fn_wait = phys->ops.wait_for_commit_done;
2443 case MSM_ENC_TX_COMPLETE:
2444 fn_wait = phys->ops.wait_for_tx_complete;
2446 case MSM_ENC_VBLANK:
2447 fn_wait = phys->ops.wait_for_vblank;
2450 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2456 DPU_ATRACE_BEGIN("wait_for_completion_event");
2457 ret = fn_wait(phys);
2458 DPU_ATRACE_END("wait_for_completion_event");
2467 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2469 struct dpu_encoder_virt *dpu_enc = NULL;
2472 DPU_ERROR("invalid encoder\n");
2473 return INTF_MODE_NONE;
2475 dpu_enc = to_dpu_encoder_virt(encoder);
2477 if (dpu_enc->cur_master)
2478 return dpu_enc->cur_master->intf_mode;
2480 if (dpu_enc->num_phys_encs)
2481 return dpu_enc->phys_encs[0]->intf_mode;
2483 return INTF_MODE_NONE;
2486 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2488 struct drm_encoder *encoder = phys_enc->parent;
2489 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2491 return dpu_enc->dsc_mask;