}
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void event_mall_stutter(struct work_struct *work)
+{
+
+ struct vblank_workqueue *vblank_work = container_of(work, struct vblank_workqueue, mall_work);
+ struct amdgpu_display_manager *dm = vblank_work->dm;
+
+ mutex_lock(&dm->dc_lock);
+
+ if (vblank_work->enable)
+ dm->active_vblank_irq_count++;
+ else
+ dm->active_vblank_irq_count--;
+
+
+ dc_allow_idle_optimizations(
+ dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
+
+ DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+
+ mutex_unlock(&dm->dc_lock);
+}
+
+static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
+{
+
+ int max_caps = dc->caps.max_links;
+ struct vblank_workqueue *vblank_work;
+ int i = 0;
+
+ vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(vblank_work)) {
+ kfree(vblank_work);
+ return NULL;
+ }
+
+ for (i = 0; i < max_caps; i++)
+ INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
+
+ return vblank_work;
+}
+#endif
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ spin_lock_init(&adev->dm.vblank_lock);
+#endif
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
amdgpu_dm_init_color_mod();
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (adev->dm.dc->caps.max_links > 0) {
+ adev->dm.vblank_workqueue = vblank_create_workqueue(adev, adev->dm.dc);
+
+ if (!adev->dm.vblank_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: vblank_workqueue init done %p.\n", adev->dm.vblank_workqueue);
+ }
+#endif
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
dc_commit_updates_for_stream(
dm->dc, bundle->surface_updates,
dc_state->stream_status->plane_count,
- dc_state->streams[k], &bundle->stream_update);
+ dc_state->streams[k], &bundle->stream_update, dc_state);
}
cleanup:
stream_update.stream = stream_state;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
- stream_state, &stream_update);
+ stream_state, &stream_update,
+ stream_state->ctx->dc->current_state);
mutex_unlock(&adev->dm.dc_lock);
}
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
struct amdgpu_display_manager *dm = &adev->dm;
+ unsigned long flags;
+#endif
int rc = 0;
if (enable) {
if (amdgpu_in_reset(adev))
return 0;
- mutex_lock(&dm->dc_lock);
-
- if (enable)
- dm->active_vblank_irq_count++;
- else
- dm->active_vblank_irq_count--;
-
#if defined(CONFIG_DRM_AMD_DC_DCN)
- dc_allow_idle_optimizations(
- adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
-
- DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+ spin_lock_irqsave(&dm->vblank_lock, flags);
+ dm->vblank_workqueue->dm = dm;
+ dm->vblank_workqueue->otg_inst = acrtc->otg_inst;
+ dm->vblank_workqueue->enable = enable;
+ spin_unlock_irqrestore(&dm->vblank_lock, flags);
+ schedule_work(&dm->vblank_workqueue->mall_work);
#endif
- mutex_unlock(&dm->dc_lock);
-
return 0;
}
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- int i;
+ uint32_t i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
amdgpu_dm_commit_cursors(state);
/* update planes when needed */
- for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
bundle->surface_updates,
planes_count,
acrtc_state->stream,
- &bundle->stream_update);
+ &bundle->stream_update,
+ dc_state);
/**
* Enable or disable the interrupts on the backend.
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_surface_update dummy_updates[MAX_SURFACES];
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
- memset(&surface_updates, 0, sizeof(surface_updates));
+ memset(&dummy_updates, 0, sizeof(dummy_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
* To fix this, DC should permit updating only stream properties.
*/
for (j = 0; j < status->plane_count; j++)
- surface_updates[j].surface = status->plane_states[j];
+ dummy_updates[j].surface = status->plane_states[0];
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
- surface_updates,
+ dummy_updates,
status->plane_count,
dm_new_crtc_state->stream,
- &stream_update);
+ &stream_update,
+ dc_state);
mutex_unlock(&dm->dc_lock);
}
struct dc_surface_update *srf_updates,
int surface_count,
struct dc_stream_state *stream,
- struct dc_stream_update *stream_update)
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
{
const struct dc_stream_status *stream_status;
enum surface_update_type update_type;
if (update_type >= UPDATE_TYPE_FULL) {
- struct dc_plane_state *new_planes[MAX_SURFACES];
-
- memset(new_planes, 0, sizeof(new_planes));
-
- for (i = 0; i < surface_count; i++)
- new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
context = dc_create_state(dc);
return;
}
- dc_resource_state_copy_construct(
- dc->current_state, context);
+ dc_resource_state_copy_construct(state, context);
- /*remove old surfaces from context */
- if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
- DC_ERROR("Failed to remove streams for new validate context!\n");
- return;
- }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- /* add surface to context */
- if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
- DC_ERROR("Failed to add streams for new validate context!\n");
- return;
+ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ new_pipe->plane_state->force_full_update = true;
}
-
}
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
.funcs = &vblank_irq_info_funcs\
}
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
+ }
+
#define vblank_int_entry(reg_num)\
[DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),