drm/amd/display: remove independent lock as we have no use case today
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "bandwidth_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51
52 /*******************************************************************************
53  * Private functions
54  ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
56 {
57         uint32_t i;
58
59         for (i = 0; i < dc->link_count; i++) {
60                 if (NULL != dc->links[i])
61                         link_destroy(&dc->links[i]);
62         }
63 }
64
65 static bool create_links(
66                 struct core_dc *dc,
67                 uint32_t num_virtual_links)
68 {
69         int i;
70         int connectors_num;
71         struct dc_bios *bios = dc->ctx->dc_bios;
72
73         dc->link_count = 0;
74
75         connectors_num = bios->funcs->get_connectors_number(bios);
76
77         if (connectors_num > ENUM_ID_COUNT) {
78                 dm_error(
79                         "DC: Number of connectors %d exceeds maximum of %d!\n",
80                         connectors_num,
81                         ENUM_ID_COUNT);
82                 return false;
83         }
84
85         if (connectors_num == 0 && num_virtual_links == 0) {
86                 dm_error("DC: Number of connectors is zero!\n");
87         }
88
89         dm_output_to_console(
90                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91                 __func__,
92                 connectors_num,
93                 num_virtual_links);
94
95         for (i = 0; i < connectors_num; i++) {
96                 struct link_init_data link_init_params = {0};
97                 struct core_link *link;
98
99                 link_init_params.ctx = dc->ctx;
100                 link_init_params.connector_index = i;
101                 link_init_params.link_index = dc->link_count;
102                 link_init_params.dc = dc;
103                 link = link_create(&link_init_params);
104
105                 if (link) {
106                         dc->links[dc->link_count] = link;
107                         link->dc = dc;
108                         ++dc->link_count;
109                 } else {
110                         dm_error("DC: failed to create link!\n");
111                 }
112         }
113
114         for (i = 0; i < num_virtual_links; i++) {
115                 struct core_link *link = dm_alloc(sizeof(*link));
116                 struct encoder_init_data enc_init = {0};
117
118                 if (link == NULL) {
119                         BREAK_TO_DEBUGGER();
120                         goto failed_alloc;
121                 }
122
123                 link->ctx = dc->ctx;
124                 link->dc = dc;
125                 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128                 link->link_id.enum_id = ENUM_ID_1;
129                 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130
131                 enc_init.ctx = dc->ctx;
132                 enc_init.channel = CHANNEL_ID_UNKNOWN;
133                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135                 enc_init.connector = link->link_id;
136                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138                 enc_init.encoder.enum_id = ENUM_ID_1;
139                 virtual_link_encoder_construct(link->link_enc, &enc_init);
140
141                 link->public.link_index = dc->link_count;
142                 dc->links[dc->link_count] = link;
143                 dc->link_count++;
144         }
145
146         return true;
147
148 failed_alloc:
149         return false;
150 }
151
152 static bool stream_adjust_vmin_vmax(struct dc *dc,
153                 const struct dc_stream **stream, int num_streams,
154                 int vmin, int vmax)
155 {
156         /* TODO: Support multiple streams */
157         struct core_dc *core_dc = DC_TO_CORE(dc);
158         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
159         int i = 0;
160         bool ret = false;
161
162         for (i = 0; i < MAX_PIPES; i++) {
163                 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
164
165                 if (pipe->stream == core_stream && pipe->stream_enc) {
166                         core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
167
168                         /* build and update the info frame */
169                         resource_build_info_frame(pipe);
170                         core_dc->hwss.update_info_frame(pipe);
171
172                         ret = true;
173                 }
174         }
175         return ret;
176 }
177
178
179 static bool set_gamut_remap(struct dc *dc,
180                         const struct dc_stream **stream, int num_streams)
181 {
182         struct core_dc *core_dc = DC_TO_CORE(dc);
183         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184         int i = 0;
185         bool ret = false;
186         struct pipe_ctx *pipes;
187
188         for (i = 0; i < MAX_PIPES; i++) {
189                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
190                                 == core_stream) {
191
192                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193                         core_dc->hwss.set_plane_config(core_dc, pipes,
194                                         &core_dc->current_context->res_ctx);
195                         ret = true;
196                 }
197         }
198
199         return ret;
200 }
201
202 /* This function is not expected to fail, proper implementation of
203  * validation will prevent this from ever being called for unsupported
204  * configurations.
205  */
206 static void stream_update_scaling(
207                 const struct dc *dc,
208                 const struct dc_stream *dc_stream,
209                 const struct rect *src,
210                 const struct rect *dst)
211 {
212         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213         struct core_dc *core_dc = DC_TO_CORE(dc);
214         struct validate_context *cur_ctx = core_dc->current_context;
215         int i;
216
217         if (src)
218                 stream->public.src = *src;
219
220         if (dst)
221                 stream->public.dst = *dst;
222
223         for (i = 0; i < cur_ctx->stream_count; i++) {
224                 struct core_stream *cur_stream = cur_ctx->streams[i];
225
226                 if (stream == cur_stream) {
227                         struct dc_stream_status *status = &cur_ctx->stream_status[i];
228
229                         if (status->surface_count)
230                                 if (!dc_commit_surfaces_to_stream(
231                                                 &core_dc->public,
232                                                 status->surfaces,
233                                                 status->surface_count,
234                                                 &cur_stream->public))
235                                         /* Need to debug validation */
236                                         BREAK_TO_DEBUGGER();
237
238                         return;
239                 }
240         }
241 }
242
243 static bool set_psr_enable(struct dc *dc, bool enable)
244 {
245         struct core_dc *core_dc = DC_TO_CORE(dc);
246         int i;
247
248         for (i = 0; i < core_dc->link_count; i++)
249                 dc_link_set_psr_enable(&core_dc->links[i]->public,
250                                 enable);
251
252         return true;
253 }
254
255
256 static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
257 {
258         struct core_dc *core_dc = DC_TO_CORE(dc);
259         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
260         struct pipe_ctx *pipes;
261         int i;
262         unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
263
264         for (i = 0; i < core_dc->link_count; i++) {
265                 if (core_stream->sink->link == core_dc->links[i])
266                         dc_link_setup_psr(&core_dc->links[i]->public,
267                                         stream);
268         }
269
270         for (i = 0; i < MAX_PIPES; i++) {
271                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
272                                 == core_stream && i != underlay_idx) {
273                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
274                         core_dc->hwss.set_static_screen_control(&pipes, 1,
275                                         0x182);
276                 }
277         }
278
279         return true;
280 }
281
282 static void set_drive_settings(struct dc *dc,
283                 struct link_training_settings *lt_settings,
284                 const struct dc_link *link)
285 {
286         struct core_dc *core_dc = DC_TO_CORE(dc);
287         int i;
288
289         for (i = 0; i < core_dc->link_count; i++) {
290                 if (&core_dc->links[i]->public == link)
291                         break;
292         }
293
294         if (i >= core_dc->link_count)
295                 ASSERT_CRITICAL(false);
296
297         dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
298 }
299
300 static void perform_link_training(struct dc *dc,
301                 struct dc_link_settings *link_setting,
302                 bool skip_video_pattern)
303 {
304         struct core_dc *core_dc = DC_TO_CORE(dc);
305         int i;
306
307         for (i = 0; i < core_dc->link_count; i++)
308                 dc_link_dp_perform_link_training(
309                         &core_dc->links[i]->public,
310                         link_setting,
311                         skip_video_pattern);
312 }
313
314 static void set_preferred_link_settings(struct dc *dc,
315                 struct dc_link_settings *link_setting,
316                 const struct dc_link *link)
317 {
318         struct core_link *core_link = DC_LINK_TO_CORE(link);
319
320         core_link->public.verified_link_cap.lane_count =
321                                 link_setting->lane_count;
322         core_link->public.verified_link_cap.link_rate =
323                                 link_setting->link_rate;
324         dp_retrain_link_dp_test(core_link, link_setting, false);
325 }
326
327 static void enable_hpd(const struct dc_link *link)
328 {
329         dc_link_dp_enable_hpd(link);
330 }
331
332 static void disable_hpd(const struct dc_link *link)
333 {
334         dc_link_dp_disable_hpd(link);
335 }
336
337
338 static void set_test_pattern(
339                 const struct dc_link *link,
340                 enum dp_test_pattern test_pattern,
341                 const struct link_training_settings *p_link_settings,
342                 const unsigned char *p_custom_pattern,
343                 unsigned int cust_pattern_size)
344 {
345         if (link != NULL)
346                 dc_link_dp_set_test_pattern(
347                         link,
348                         test_pattern,
349                         p_link_settings,
350                         p_custom_pattern,
351                         cust_pattern_size);
352 }
353
354 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
355 {
356         core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
357         if (core_dc->hwss.set_drr != NULL) {
358                 core_dc->public.stream_funcs.adjust_vmin_vmax =
359                                 stream_adjust_vmin_vmax;
360         }
361
362         core_dc->public.stream_funcs.set_gamut_remap =
363                         set_gamut_remap;
364
365         core_dc->public.stream_funcs.set_psr_enable =
366                         set_psr_enable;
367
368         core_dc->public.stream_funcs.setup_psr =
369                         setup_psr;
370
371         core_dc->public.link_funcs.set_drive_settings =
372                         set_drive_settings;
373
374         core_dc->public.link_funcs.perform_link_training =
375                         perform_link_training;
376
377         core_dc->public.link_funcs.set_preferred_link_settings =
378                         set_preferred_link_settings;
379
380         core_dc->public.link_funcs.enable_hpd =
381                         enable_hpd;
382
383         core_dc->public.link_funcs.disable_hpd =
384                         disable_hpd;
385
386         core_dc->public.link_funcs.set_test_pattern =
387                         set_test_pattern;
388 }
389
390 static void destruct(struct core_dc *dc)
391 {
392         resource_validate_ctx_destruct(dc->current_context);
393
394         destroy_links(dc);
395
396         dc_destroy_resource_pool(dc);
397
398         if (dc->ctx->gpio_service)
399                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
400
401         if (dc->ctx->i2caux)
402                 dal_i2caux_destroy(&dc->ctx->i2caux);
403
404         if (dc->ctx->created_bios)
405                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
406
407         if (dc->ctx->logger)
408                 dal_logger_destroy(&dc->ctx->logger);
409
410         dm_free(dc->current_context);
411         dc->current_context = NULL;
412         dm_free(dc->temp_flip_context);
413         dc->temp_flip_context = NULL;
414         dm_free(dc->scratch_val_ctx);
415         dc->scratch_val_ctx = NULL;
416
417         dm_free(dc->ctx);
418         dc->ctx = NULL;
419 }
420
421 static bool construct(struct core_dc *dc,
422                 const struct dc_init_data *init_params)
423 {
424         struct dal_logger *logger;
425         struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
426         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
427
428         if (!dc_ctx) {
429                 dm_error("%s: failed to create ctx\n", __func__);
430                 goto ctx_fail;
431         }
432
433         dc->current_context = dm_alloc(sizeof(*dc->current_context));
434         dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
435         dc->scratch_val_ctx = dm_alloc(sizeof(*dc->scratch_val_ctx));
436
437         if (!dc->current_context || !dc->temp_flip_context) {
438                 dm_error("%s: failed to create validate ctx\n", __func__);
439                 goto val_ctx_fail;
440         }
441
442         dc_ctx->cgs_device = init_params->cgs_device;
443         dc_ctx->driver_context = init_params->driver;
444         dc_ctx->dc = &dc->public;
445         dc_ctx->asic_id = init_params->asic_id;
446
447         /* Create logger */
448         logger = dal_logger_create(dc_ctx);
449
450         if (!logger) {
451                 /* can *not* call logger. call base driver 'print error' */
452                 dm_error("%s: failed to create Logger!\n", __func__);
453                 goto logger_fail;
454         }
455         dc_ctx->logger = logger;
456         dc->ctx = dc_ctx;
457         dc->ctx->dce_environment = init_params->dce_environment;
458
459         dc_version = resource_parse_asic_id(init_params->asic_id);
460         dc->ctx->dce_version = dc_version;
461
462         /* Resource should construct all asic specific resources.
463          * This should be the only place where we need to parse the asic id
464          */
465         if (init_params->vbios_override)
466                 dc_ctx->dc_bios = init_params->vbios_override;
467         else {
468                 /* Create BIOS parser */
469                 struct bp_init_data bp_init_data;
470                 bp_init_data.ctx = dc_ctx;
471                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
472
473                 dc_ctx->dc_bios = dal_bios_parser_create(
474                                 &bp_init_data, dc_version);
475
476                 if (!dc_ctx->dc_bios) {
477                         ASSERT_CRITICAL(false);
478                         goto bios_fail;
479                 }
480
481                 dc_ctx->created_bios = true;
482         }
483
484         /* Create I2C AUX */
485         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
486
487         if (!dc_ctx->i2caux) {
488                 ASSERT_CRITICAL(false);
489                 goto failed_to_create_i2caux;
490         }
491
492         /* Create GPIO service */
493         dc_ctx->gpio_service = dal_gpio_service_create(
494                         dc_version,
495                         dc_ctx->dce_environment,
496                         dc_ctx);
497
498         if (!dc_ctx->gpio_service) {
499                 ASSERT_CRITICAL(false);
500                 goto gpio_fail;
501         }
502
503         dc->res_pool = dc_create_resource_pool(
504                         dc,
505                         init_params->num_virtual_links,
506                         dc_version,
507                         init_params->asic_id);
508         if (!dc->res_pool)
509                 goto create_resource_fail;
510
511         if (!create_links(dc, init_params->num_virtual_links))
512                 goto create_links_fail;
513
514         allocate_dc_stream_funcs(dc);
515
516         return true;
517
518         /**** error handling here ****/
519 create_links_fail:
520 create_resource_fail:
521 gpio_fail:
522 failed_to_create_i2caux:
523 bios_fail:
524 logger_fail:
525 val_ctx_fail:
526 ctx_fail:
527         destruct(dc);
528         return false;
529 }
530
531 /*
532 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
533 {
534         fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
535         unsigned int pixDurationInPico = round(pixel_duration);
536
537         DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
538
539         arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
540         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
541         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
542
543         arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
544         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
545         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
546
547         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
548         WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
549
550         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
551         WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
552 }
553 */
554
555 /*******************************************************************************
556  * Public functions
557  ******************************************************************************/
558
559 struct dc *dc_create(const struct dc_init_data *init_params)
560  {
561         struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
562         unsigned int full_pipe_count;
563
564         if (NULL == core_dc)
565                 goto alloc_fail;
566
567         if (false == construct(core_dc, init_params))
568                 goto construct_fail;
569
570         /*TODO: separate HW and SW initialization*/
571         core_dc->hwss.init_hw(core_dc);
572
573         full_pipe_count = core_dc->res_pool->pipe_count;
574         if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
575                 full_pipe_count--;
576         core_dc->public.caps.max_streams = min(
577                         full_pipe_count,
578                         core_dc->res_pool->stream_enc_count);
579
580         core_dc->public.caps.max_links = core_dc->link_count;
581         core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
582
583         core_dc->public.config = init_params->flags;
584
585         dm_logger_write(core_dc->ctx->logger, LOG_DC,
586                         "Display Core initialized\n");
587
588
589         /* TODO: missing feature to be enabled */
590         core_dc->public.debug.disable_dfs_bypass = true;
591
592         return &core_dc->public;
593
594 construct_fail:
595         dm_free(core_dc);
596
597 alloc_fail:
598         return NULL;
599 }
600
601 void dc_destroy(struct dc **dc)
602 {
603         struct core_dc *core_dc = DC_TO_CORE(*dc);
604         destruct(core_dc);
605         dm_free(core_dc);
606         *dc = NULL;
607 }
608
609 static bool is_validation_required(
610                 const struct core_dc *dc,
611                 const struct dc_validation_set set[],
612                 int set_count)
613 {
614         const struct validate_context *context = dc->current_context;
615         int i, j;
616
617         if (context->stream_count != set_count)
618                 return true;
619
620         for (i = 0; i < set_count; i++) {
621
622                 if (set[i].surface_count != context->stream_status[i].surface_count)
623                         return true;
624                 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
625                         return true;
626
627                 for (j = 0; j < set[i].surface_count; j++) {
628                         struct dc_surface temp_surf = { 0 };
629
630                         temp_surf = *context->stream_status[i].surfaces[j];
631                         temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
632                         temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
633                         temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
634
635                         if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
636                                 return true;
637                 }
638         }
639
640         return false;
641 }
642
643 bool dc_validate_resources(
644                 const struct dc *dc,
645                 const struct dc_validation_set set[],
646                 uint8_t set_count)
647 {
648         struct core_dc *core_dc = DC_TO_CORE(dc);
649         enum dc_status result = DC_ERROR_UNEXPECTED;
650         struct validate_context *context;
651
652         if (!is_validation_required(core_dc, set, set_count))
653                 return true;
654
655         context = dm_alloc(sizeof(struct validate_context));
656         if(context == NULL)
657                 goto context_alloc_fail;
658
659         result = core_dc->res_pool->funcs->validate_with_context(
660                                                 core_dc, set, set_count, context);
661
662         resource_validate_ctx_destruct(context);
663         dm_free(context);
664
665 context_alloc_fail:
666         if (result != DC_OK) {
667                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
668                                 "%s:resource validation failed, dc_status:%d\n",
669                                 __func__,
670                                 result);
671         }
672
673         return (result == DC_OK);
674
675 }
676
677 bool dc_validate_guaranteed(
678                 const struct dc *dc,
679                 const struct dc_stream *stream)
680 {
681         struct core_dc *core_dc = DC_TO_CORE(dc);
682         enum dc_status result = DC_ERROR_UNEXPECTED;
683         struct validate_context *context;
684
685         context = dm_alloc(sizeof(struct validate_context));
686         if (context == NULL)
687                 goto context_alloc_fail;
688
689         result = core_dc->res_pool->funcs->validate_guaranteed(
690                                         core_dc, stream, context);
691
692         resource_validate_ctx_destruct(context);
693         dm_free(context);
694
695 context_alloc_fail:
696         if (result != DC_OK) {
697                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
698                         "%s:guaranteed validation failed, dc_status:%d\n",
699                         __func__,
700                         result);
701                 }
702
703         return (result == DC_OK);
704 }
705
706 static void program_timing_sync(
707                 struct core_dc *core_dc,
708                 struct validate_context *ctx)
709 {
710         int i, j;
711         int group_index = 0;
712         int pipe_count = ctx->res_ctx.pool->pipe_count;
713         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
714
715         for (i = 0; i < pipe_count; i++) {
716                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
717                         continue;
718
719                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
720         }
721
722         for (i = 0; i < pipe_count; i++) {
723                 int group_size = 1;
724                 struct pipe_ctx *pipe_set[MAX_PIPES];
725
726                 if (!unsynced_pipes[i])
727                         continue;
728
729                 pipe_set[0] = unsynced_pipes[i];
730                 unsynced_pipes[i] = NULL;
731
732                 /* Add tg to the set, search rest of the tg's for ones with
733                  * same timing, add all tgs with same timing to the group
734                  */
735                 for (j = i + 1; j < pipe_count; j++) {
736                         if (!unsynced_pipes[j])
737                                 continue;
738
739                         if (resource_are_streams_timing_synchronizable(
740                                         unsynced_pipes[j]->stream,
741                                         pipe_set[0]->stream)) {
742                                 pipe_set[group_size] = unsynced_pipes[j];
743                                 unsynced_pipes[j] = NULL;
744                                 group_size++;
745                         }
746                 }
747
748                 /* set first unblanked pipe as master */
749                 for (j = 0; j < group_size; j++) {
750                         struct pipe_ctx *temp;
751
752                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
753                                 if (j == 0)
754                                         break;
755
756                                 temp = pipe_set[0];
757                                 pipe_set[0] = pipe_set[j];
758                                 pipe_set[j] = temp;
759                                 break;
760                         }
761                 }
762
763                 /* remove any other unblanked pipes as they have already been synced */
764                 for (j = j + 1; j < group_size; j++) {
765                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
766                                 group_size--;
767                                 pipe_set[j] = pipe_set[group_size];
768                                 j--;
769                         }
770                 }
771
772                 if (group_size > 1) {
773                         core_dc->hwss.enable_timing_synchronization(
774                                 core_dc, group_index, group_size, pipe_set);
775                         group_index++;
776                 }
777         }
778 }
779
780 static bool streams_changed(
781                 struct core_dc *dc,
782                 const struct dc_stream *streams[],
783                 uint8_t stream_count)
784 {
785         uint8_t i;
786
787         if (stream_count != dc->current_context->stream_count)
788                 return true;
789
790         for (i = 0; i < dc->current_context->stream_count; i++) {
791                 if (&dc->current_context->streams[i]->public != streams[i])
792                         return true;
793         }
794
795         return false;
796 }
797
798 bool dc_commit_streams(
799         struct dc *dc,
800         const struct dc_stream *streams[],
801         uint8_t stream_count)
802 {
803         struct core_dc *core_dc = DC_TO_CORE(dc);
804         struct dc_bios *dcb = core_dc->ctx->dc_bios;
805         enum dc_status result = DC_ERROR_UNEXPECTED;
806         struct validate_context *context;
807         struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
808         int i, j, k;
809
810         if (false == streams_changed(core_dc, streams, stream_count))
811                 return DC_OK;
812
813         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
814                                 __func__, stream_count);
815
816         for (i = 0; i < stream_count; i++) {
817                 const struct dc_stream *stream = streams[i];
818                 const struct dc_stream_status *status = dc_stream_get_status(stream);
819                 int j;
820
821                 dc_stream_log(stream,
822                                 core_dc->ctx->logger,
823                                 LOG_DC);
824
825                 set[i].stream = stream;
826
827                 if (status) {
828                         set[i].surface_count = status->surface_count;
829                         for (j = 0; j < status->surface_count; j++)
830                                 set[i].surfaces[j] = status->surfaces[j];
831                 }
832
833         }
834
835         context = dm_alloc(sizeof(struct validate_context));
836         if (context == NULL)
837                 goto context_alloc_fail;
838
839         result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
840         if (result != DC_OK){
841                 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
842                                         "%s: Context validation failed! dc_status:%d\n",
843                                         __func__,
844                                         result);
845                 BREAK_TO_DEBUGGER();
846                 resource_validate_ctx_destruct(context);
847                 goto fail;
848         }
849
850         if (!dcb->funcs->is_accelerated_mode(dcb)) {
851                 core_dc->hwss.enable_accelerated_mode(core_dc);
852         }
853
854         if (result == DC_OK) {
855                 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
856         }
857
858         program_timing_sync(core_dc, context);
859
860         for (i = 0; i < context->stream_count; i++) {
861                 const struct core_sink *sink = context->streams[i]->sink;
862
863                 for (j = 0; j < context->stream_status[i].surface_count; j++) {
864                         const struct dc_surface *dc_surface =
865                                         context->stream_status[i].surfaces[j];
866
867                         for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
868                                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
869
870                                 if (dc_surface != &pipe->surface->public
871                                                 || !dc_surface->visible)
872                                         continue;
873
874                                 pipe->tg->funcs->set_blank(pipe->tg, false);
875                         }
876                 }
877
878                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
879                                 context->streams[i]->public.timing.h_addressable,
880                                 context->streams[i]->public.timing.v_addressable,
881                                 context->streams[i]->public.timing.h_total,
882                                 context->streams[i]->public.timing.v_total,
883                                 context->streams[i]->public.timing.pix_clk_khz);
884         }
885
886         resource_validate_ctx_destruct(core_dc->current_context);
887
888         if (core_dc->temp_flip_context != core_dc->current_context) {
889                 dm_free(core_dc->temp_flip_context);
890                 core_dc->temp_flip_context = core_dc->current_context;
891         }
892         core_dc->current_context = context;
893         memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
894
895         return (result == DC_OK);
896
897 fail:
898         dm_free(context);
899
900 context_alloc_fail:
901         return (result == DC_OK);
902 }
903
904 bool dc_pre_update_surfaces_to_stream(
905                 struct dc *dc,
906                 const struct dc_surface *const *new_surfaces,
907                 uint8_t new_surface_count,
908                 const struct dc_stream *dc_stream)
909 {
910         int i, j;
911         struct core_dc *core_dc = DC_TO_CORE(dc);
912         struct dc_stream_status *stream_status = NULL;
913         struct validate_context *context;
914         bool ret = true;
915
916         pre_surface_trace(dc, new_surfaces, new_surface_count);
917
918         if (core_dc->current_context->stream_count == 0)
919                 return false;
920
921         /* Cannot commit surface to a stream that is not commited */
922         for (i = 0; i < core_dc->current_context->stream_count; i++)
923                 if (dc_stream == &core_dc->current_context->streams[i]->public)
924                         break;
925
926         if (i == core_dc->current_context->stream_count)
927                 return false;
928
929         stream_status = &core_dc->current_context->stream_status[i];
930
931         if (new_surface_count == stream_status->surface_count) {
932                 bool skip_pre = true;
933
934                 for (i = 0; i < stream_status->surface_count; i++) {
935                         struct dc_surface temp_surf = { 0 };
936
937                         temp_surf = *stream_status->surfaces[i];
938                         temp_surf.clip_rect = new_surfaces[i]->clip_rect;
939                         temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
940                         temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
941
942                         if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
943                                 skip_pre = false;
944                                 break;
945                         }
946                 }
947
948                 if (skip_pre)
949                         return true;
950         }
951
952         context = dm_alloc(sizeof(struct validate_context));
953
954         if (!context) {
955                 dm_error("%s: failed to create validate ctx\n", __func__);
956                 ret = false;
957                 goto val_ctx_fail;
958         }
959
960         resource_validate_ctx_copy_construct(core_dc->current_context, context);
961
962         dm_logger_write(core_dc->ctx->logger, LOG_DC,
963                                 "%s: commit %d surfaces to stream 0x%x\n",
964                                 __func__,
965                                 new_surface_count,
966                                 dc_stream);
967
968         if (!resource_attach_surfaces_to_context(
969                         new_surfaces, new_surface_count, dc_stream, context)) {
970                 BREAK_TO_DEBUGGER();
971                 ret = false;
972                 goto unexpected_fail;
973         }
974
975         for (i = 0; i < new_surface_count; i++)
976                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
977                         if (context->res_ctx.pipe_ctx[j].surface !=
978                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
979                                 continue;
980
981                         resource_build_scaling_params(
982                                 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
983                 }
984
985         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
986                 BREAK_TO_DEBUGGER();
987                 ret = false;
988                 goto unexpected_fail;
989         }
990
991         core_dc->hwss.set_bandwidth(core_dc, context, false);
992
993         for (i = 0; i < new_surface_count; i++)
994                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
995                         if (context->res_ctx.pipe_ctx[j].surface !=
996                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
997                                 continue;
998
999                         core_dc->hwss.prepare_pipe_for_context(
1000                                         core_dc,
1001                                         &context->res_ctx.pipe_ctx[j],
1002                                         context);
1003                 }
1004
1005 unexpected_fail:
1006         resource_validate_ctx_destruct(context);
1007         dm_free(context);
1008 val_ctx_fail:
1009
1010         return ret;
1011 }
1012
1013 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1014 {
1015         int i;
1016         struct core_dc *core_dc = DC_TO_CORE(dc);
1017         struct validate_context *context = dm_alloc(sizeof(struct validate_context));
1018
1019         if (!context) {
1020                 dm_error("%s: failed to create validate ctx\n", __func__);
1021                 return false;
1022         }
1023         resource_validate_ctx_copy_construct(core_dc->current_context, context);
1024
1025         post_surface_trace(dc);
1026
1027         for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
1028                 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1029                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1030                         core_dc->hwss.power_down_front_end(
1031                                         core_dc, &context->res_ctx.pipe_ctx[i]);
1032                 }
1033         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1034                 BREAK_TO_DEBUGGER();
1035                 return false;
1036         }
1037
1038         core_dc->hwss.set_bandwidth(core_dc, context, true);
1039
1040         resource_validate_ctx_destruct(core_dc->current_context);
1041         core_dc->current_context = context;
1042
1043         return true;
1044 }
1045
1046 bool dc_commit_surfaces_to_stream(
1047                 struct dc *dc,
1048                 const struct dc_surface **new_surfaces,
1049                 uint8_t new_surface_count,
1050                 const struct dc_stream *dc_stream)
1051 {
1052         struct dc_surface_update updates[MAX_SURFACES];
1053         struct dc_flip_addrs flip_addr[MAX_SURFACES];
1054         struct dc_plane_info plane_info[MAX_SURFACES];
1055         struct dc_scaling_info scaling_info[MAX_SURFACES];
1056         int i;
1057
1058         if (!dc_pre_update_surfaces_to_stream(
1059                         dc, new_surfaces, new_surface_count, dc_stream))
1060                 return false;
1061
1062         memset(updates, 0, sizeof(updates));
1063         memset(flip_addr, 0, sizeof(flip_addr));
1064         memset(plane_info, 0, sizeof(plane_info));
1065         memset(scaling_info, 0, sizeof(scaling_info));
1066
1067         for (i = 0; i < new_surface_count; i++) {
1068                 updates[i].surface = new_surfaces[i];
1069                 updates[i].gamma =
1070                         (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1071                 flip_addr[i].address = new_surfaces[i]->address;
1072                 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1073                 plane_info[i].color_space = new_surfaces[i]->color_space;
1074                 plane_info[i].format = new_surfaces[i]->format;
1075                 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1076                 plane_info[i].rotation = new_surfaces[i]->rotation;
1077                 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1078                 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1079                 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1080                 plane_info[i].visible = new_surfaces[i]->visible;
1081                 plane_info[i].dcc = new_surfaces[i]->dcc;
1082                 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1083                 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1084                 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1085                 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1086
1087                 updates[i].flip_addr = &flip_addr[i];
1088                 updates[i].plane_info = &plane_info[i];
1089                 updates[i].scaling_info = &scaling_info[i];
1090         }
1091         dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
1092
1093         return dc_post_update_surfaces_to_stream(dc);
1094 }
1095
1096 static bool is_surface_in_context(
1097                 const struct validate_context *context,
1098                 const struct dc_surface *surface)
1099 {
1100         int j;
1101
1102         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1103                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1104
1105                 if (surface == &pipe_ctx->surface->public) {
1106                         return true;
1107                 }
1108         }
1109
1110         return false;
1111 }
1112
1113 enum surface_update_type {
1114         UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
1115         UPDATE_TYPE_MED,  /* a lot of programming needed.  may need to alloc */
1116         UPDATE_TYPE_FULL, /* may need to shuffle resources */
1117 };
1118
1119 static enum surface_update_type det_surface_update(
1120                 const struct core_dc *dc,
1121                 const struct dc_surface_update *u)
1122 {
1123         const struct validate_context *context = dc->current_context;
1124
1125         if (u->scaling_info || u->plane_info)
1126                 /* todo: not all scale and plane_info update need full update
1127                  * ie. check if following is the same
1128                  * scale ratio, view port, surface bpp etc
1129                  */
1130                 return UPDATE_TYPE_FULL; /* may need bandwidth update */
1131
1132         if (!is_surface_in_context(context, u->surface))
1133                 return UPDATE_TYPE_FULL;
1134
1135         if (u->in_transfer_func ||
1136                 u->out_transfer_func ||
1137                 u->hdr_static_metadata)
1138                 return UPDATE_TYPE_MED;
1139
1140         return UPDATE_TYPE_FAST;
1141 }
1142
1143 static enum surface_update_type check_update_surfaces_for_stream(
1144                 struct core_dc *dc,
1145                 struct dc_surface_update *updates,
1146                 int surface_count,
1147                 const struct dc_stream_status *stream_status)
1148 {
1149         int i;
1150         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1151
1152         if (stream_status->surface_count != surface_count)
1153                 return UPDATE_TYPE_FULL;
1154
1155         for (i = 0 ; i < surface_count; i++) {
1156                 enum surface_update_type type =
1157                                 det_surface_update(dc, &updates[i]);
1158
1159                 if (type == UPDATE_TYPE_FULL)
1160                         return type;
1161
1162                 if (overall_type < type)
1163                         overall_type = type;
1164         }
1165
1166         return overall_type;
1167 }
1168
1169 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1170
1171 void dc_update_surfaces_for_stream(struct dc *dc,
1172                 struct dc_surface_update *updates, int surface_count,
1173                 const struct dc_stream *dc_stream)
1174 {
1175         struct core_dc *core_dc = DC_TO_CORE(dc);
1176         struct validate_context *context;
1177         int i, j;
1178
1179         enum surface_update_type update_type;
1180         const struct dc_stream_status *stream_status;
1181
1182         stream_status = dc_stream_get_status(dc_stream);
1183         ASSERT(stream_status);
1184         if (!stream_status)
1185                 return; /* Cannot commit surface to stream that is not committed */
1186
1187         update_type = check_update_surfaces_for_stream(
1188                         core_dc, updates, surface_count, stream_status);
1189
1190         if (update_type >= update_surface_trace_level)
1191                 update_surface_trace(dc, updates, surface_count);
1192
1193         if (update_type >= UPDATE_TYPE_FULL) {
1194                 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1195
1196                 for (i = 0; i < surface_count; i++)
1197                         new_surfaces[i] = updates[i].surface;
1198
1199                 /* initialize scratch memory for building context */
1200                 context = core_dc->temp_flip_context;
1201                 resource_validate_ctx_copy_construct(
1202                                 core_dc->current_context, context);
1203
1204                 /* add surface to context */
1205                 if (!resource_attach_surfaces_to_context(
1206                                 new_surfaces, surface_count, dc_stream, context)) {
1207                         BREAK_TO_DEBUGGER();
1208                         return;
1209                 }
1210         } else {
1211                 context = core_dc->current_context;
1212         }
1213         for (i = 0; i < surface_count; i++) {
1214                 /* save update param into surface */
1215                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1216                 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1217
1218                 if (updates[i].flip_addr) {
1219                         surface->public.address = updates[i].flip_addr->address;
1220                         surface->public.flip_immediate =
1221                                         updates[i].flip_addr->flip_immediate;
1222                 }
1223
1224                 if (updates[i].scaling_info) {
1225                         surface->public.scaling_quality =
1226                                         updates[i].scaling_info->scaling_quality;
1227                         surface->public.dst_rect =
1228                                         updates[i].scaling_info->dst_rect;
1229                         surface->public.src_rect =
1230                                         updates[i].scaling_info->src_rect;
1231                         surface->public.clip_rect =
1232                                         updates[i].scaling_info->clip_rect;
1233                 }
1234
1235                 if (updates[i].plane_info) {
1236                         surface->public.color_space =
1237                                         updates[i].plane_info->color_space;
1238                         surface->public.format =
1239                                         updates[i].plane_info->format;
1240                         surface->public.plane_size =
1241                                         updates[i].plane_info->plane_size;
1242                         surface->public.rotation =
1243                                         updates[i].plane_info->rotation;
1244                         surface->public.horizontal_mirror =
1245                                         updates[i].plane_info->horizontal_mirror;
1246                         surface->public.stereo_format =
1247                                         updates[i].plane_info->stereo_format;
1248                         surface->public.tiling_info =
1249                                         updates[i].plane_info->tiling_info;
1250                         surface->public.visible =
1251                                         updates[i].plane_info->visible;
1252                         surface->public.dcc =
1253                                         updates[i].plane_info->dcc;
1254                 }
1255
1256                 /* not sure if we still need this */
1257                 if (update_type == UPDATE_TYPE_FULL) {
1258                         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1259                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1260
1261                                 if (pipe_ctx->surface != surface)
1262                                         continue;
1263
1264                                 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1265                         }
1266                 }
1267
1268                 if (updates[i].gamma &&
1269                         updates[i].gamma != surface->public.gamma_correction) {
1270                         if (surface->public.gamma_correction != NULL)
1271                                 dc_gamma_release(&surface->public.
1272                                                 gamma_correction);
1273
1274                         dc_gamma_retain(updates[i].gamma);
1275                         surface->public.gamma_correction =
1276                                                 updates[i].gamma;
1277                 }
1278
1279                 if (updates[i].in_transfer_func &&
1280                         updates[i].in_transfer_func != surface->public.in_transfer_func) {
1281                         if (surface->public.in_transfer_func != NULL)
1282                                 dc_transfer_func_release(
1283                                                 surface->public.
1284                                                 in_transfer_func);
1285
1286                         dc_transfer_func_retain(
1287                                         updates[i].in_transfer_func);
1288                         surface->public.in_transfer_func =
1289                                         updates[i].in_transfer_func;
1290                 }
1291
1292                 if (updates[i].out_transfer_func &&
1293                         updates[i].out_transfer_func != dc_stream->out_transfer_func) {
1294                         if (dc_stream->out_transfer_func != NULL)
1295                                 dc_transfer_func_release(dc_stream->out_transfer_func);
1296                         dc_transfer_func_retain(updates[i].out_transfer_func);
1297                         stream->public.out_transfer_func = updates[i].out_transfer_func;
1298                 }
1299                 if (updates[i].hdr_static_metadata)
1300                         surface->public.hdr_static_ctx =
1301                                 *(updates[i].hdr_static_metadata);
1302         }
1303
1304         if (update_type == UPDATE_TYPE_FULL &&
1305                         !core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1306                 BREAK_TO_DEBUGGER();
1307                 return;
1308         }
1309
1310         if (!surface_count)  /* reset */
1311                 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1312
1313         for (i = 0; i < surface_count; i++) {
1314                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1315
1316                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1317                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1318                         struct pipe_ctx *cur_pipe_ctx;
1319                         bool is_new_pipe_surface = true;
1320
1321                         if (pipe_ctx->surface != surface)
1322                                 continue;
1323
1324                         if (update_type == UPDATE_TYPE_FULL) {
1325                                 /* only apply for top pipe */
1326                                 if (!pipe_ctx->top_pipe) {
1327                                         core_dc->hwss.apply_ctx_for_surface(core_dc,
1328                                                          surface, context);
1329                                         context_timing_trace(dc, &context->res_ctx);
1330                                 }
1331                         }
1332
1333                         if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1334                                 core_dc->hwss.pipe_control_lock(
1335                                                 core_dc,
1336                                                 pipe_ctx,
1337                                                 true);
1338                         }
1339
1340                         if (updates[i].flip_addr)
1341                                 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1342
1343                         if (update_type == UPDATE_TYPE_FAST)
1344                                 continue;
1345
1346                         cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1347                         if (cur_pipe_ctx->surface == pipe_ctx->surface)
1348                                 is_new_pipe_surface = false;
1349
1350                         if (is_new_pipe_surface ||
1351                                         updates[i].in_transfer_func)
1352                                 core_dc->hwss.set_input_transfer_func(
1353                                                 pipe_ctx, pipe_ctx->surface);
1354
1355                         if (is_new_pipe_surface ||
1356                                         updates[i].out_transfer_func)
1357                                 core_dc->hwss.set_output_transfer_func(
1358                                                 pipe_ctx,
1359                                                 pipe_ctx->surface,
1360                                                 pipe_ctx->stream);
1361
1362                         if (updates[i].hdr_static_metadata) {
1363                                 resource_build_info_frame(pipe_ctx);
1364                                 core_dc->hwss.update_info_frame(pipe_ctx);
1365                         }
1366                 }
1367         }
1368
1369         for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1370                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1371
1372                 for (j = 0; j < surface_count; j++) {
1373                         if (updates[j].surface == &pipe_ctx->surface->public) {
1374                                 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1375                                         core_dc->hwss.pipe_control_lock(
1376                                                         core_dc,
1377                                                         pipe_ctx,
1378                                                         false);
1379                                 }
1380                                 break;
1381                         }
1382                 }
1383         }
1384
1385         if (core_dc->current_context != context) {
1386                 resource_validate_ctx_destruct(core_dc->current_context);
1387                 core_dc->temp_flip_context = core_dc->current_context;
1388
1389                 core_dc->current_context = context;
1390         }
1391 }
1392
1393 uint8_t dc_get_current_stream_count(const struct dc *dc)
1394 {
1395         struct core_dc *core_dc = DC_TO_CORE(dc);
1396         return core_dc->current_context->stream_count;
1397 }
1398
1399 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1400 {
1401         struct core_dc *core_dc = DC_TO_CORE(dc);
1402         if (i < core_dc->current_context->stream_count)
1403                 return &(core_dc->current_context->streams[i]->public);
1404         return NULL;
1405 }
1406
1407 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1408 {
1409         struct core_dc *core_dc = DC_TO_CORE(dc);
1410         return &core_dc->links[link_index]->public;
1411 }
1412
1413 const struct graphics_object_id dc_get_link_id_at_index(
1414         struct dc *dc, uint32_t link_index)
1415 {
1416         struct core_dc *core_dc = DC_TO_CORE(dc);
1417         return core_dc->links[link_index]->link_id;
1418 }
1419
1420 const struct ddc_service *dc_get_ddc_at_index(
1421         struct dc *dc, uint32_t link_index)
1422 {
1423         struct core_dc *core_dc = DC_TO_CORE(dc);
1424         return core_dc->links[link_index]->ddc;
1425 }
1426
1427 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1428         struct dc *dc, uint32_t link_index)
1429 {
1430         struct core_dc *core_dc = DC_TO_CORE(dc);
1431         return core_dc->links[link_index]->public.irq_source_hpd;
1432 }
1433
1434 const struct audio **dc_get_audios(struct dc *dc)
1435 {
1436         struct core_dc *core_dc = DC_TO_CORE(dc);
1437         return (const struct audio **)core_dc->res_pool->audios;
1438 }
1439
1440 void dc_flip_surface_addrs(
1441                 struct dc *dc,
1442                 const struct dc_surface *const surfaces[],
1443                 struct dc_flip_addrs flip_addrs[],
1444                 uint32_t count)
1445 {
1446         struct core_dc *core_dc = DC_TO_CORE(dc);
1447         int i, j;
1448
1449         for (i = 0; i < count; i++) {
1450                 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1451
1452                 surface->public.address = flip_addrs[i].address;
1453                 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1454
1455                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1456                         struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1457
1458                         if (pipe_ctx->surface != surface)
1459                                 continue;
1460
1461                         core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1462                 }
1463         }
1464 }
1465
1466 enum dc_irq_source dc_interrupt_to_irq_source(
1467                 struct dc *dc,
1468                 uint32_t src_id,
1469                 uint32_t ext_id)
1470 {
1471         struct core_dc *core_dc = DC_TO_CORE(dc);
1472         return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1473 }
1474
1475 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1476 {
1477         struct core_dc *core_dc = DC_TO_CORE(dc);
1478         dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1479 }
1480
1481 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1482 {
1483         struct core_dc *core_dc = DC_TO_CORE(dc);
1484         dal_irq_service_ack(core_dc->res_pool->irqs, src);
1485 }
1486
1487 void dc_set_power_state(
1488         struct dc *dc,
1489         enum dc_acpi_cm_power_state power_state,
1490         enum dc_video_power_state video_power_state)
1491 {
1492         struct core_dc *core_dc = DC_TO_CORE(dc);
1493
1494         core_dc->previous_power_state = core_dc->current_power_state;
1495         core_dc->current_power_state = video_power_state;
1496
1497         switch (power_state) {
1498         case DC_ACPI_CM_POWER_STATE_D0:
1499                 core_dc->hwss.init_hw(core_dc);
1500                 break;
1501         default:
1502                 /* NULL means "reset/release all DC streams" */
1503                 dc_commit_streams(dc, NULL, 0);
1504
1505                 core_dc->hwss.power_down(core_dc);
1506
1507                 /* Zero out the current context so that on resume we start with
1508                  * clean state, and dc hw programming optimizations will not
1509                  * cause any trouble.
1510                  */
1511                 memset(core_dc->current_context, 0,
1512                                 sizeof(*core_dc->current_context));
1513
1514                 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1515
1516                 break;
1517         }
1518
1519 }
1520
1521 void dc_resume(const struct dc *dc)
1522 {
1523         struct core_dc *core_dc = DC_TO_CORE(dc);
1524
1525         uint32_t i;
1526
1527         for (i = 0; i < core_dc->link_count; i++)
1528                 core_link_resume(core_dc->links[i]);
1529 }
1530
1531 bool dc_read_dpcd(
1532                 struct dc *dc,
1533                 uint32_t link_index,
1534                 uint32_t address,
1535                 uint8_t *data,
1536                 uint32_t size)
1537 {
1538         struct core_dc *core_dc = DC_TO_CORE(dc);
1539
1540         struct core_link *link = core_dc->links[link_index];
1541         enum ddc_result r = dal_ddc_service_read_dpcd_data(
1542                         link->ddc,
1543                         address,
1544                         data,
1545                         size);
1546         return r == DDC_RESULT_SUCESSFULL;
1547 }
1548
1549 bool dc_query_ddc_data(
1550                 struct dc *dc,
1551                 uint32_t link_index,
1552                 uint32_t address,
1553                 uint8_t *write_buf,
1554                 uint32_t write_size,
1555                 uint8_t *read_buf,
1556                 uint32_t read_size) {
1557
1558         struct core_dc *core_dc = DC_TO_CORE(dc);
1559
1560         struct core_link *link = core_dc->links[link_index];
1561
1562         bool result = dal_ddc_service_query_ddc_data(
1563                         link->ddc,
1564                         address,
1565                         write_buf,
1566                         write_size,
1567                         read_buf,
1568                         read_size);
1569
1570         return result;
1571 }
1572
1573
1574 bool dc_write_dpcd(
1575                 struct dc *dc,
1576                 uint32_t link_index,
1577                 uint32_t address,
1578                 const uint8_t *data,
1579                 uint32_t size)
1580 {
1581         struct core_dc *core_dc = DC_TO_CORE(dc);
1582
1583         struct core_link *link = core_dc->links[link_index];
1584
1585         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1586                         link->ddc,
1587                         address,
1588                         data,
1589                         size);
1590         return r == DDC_RESULT_SUCESSFULL;
1591 }
1592
1593 bool dc_submit_i2c(
1594                 struct dc *dc,
1595                 uint32_t link_index,
1596                 struct i2c_command *cmd)
1597 {
1598         struct core_dc *core_dc = DC_TO_CORE(dc);
1599
1600         struct core_link *link = core_dc->links[link_index];
1601         struct ddc_service *ddc = link->ddc;
1602
1603         return dal_i2caux_submit_i2c_command(
1604                 ddc->ctx->i2caux,
1605                 ddc->ddc_pin,
1606                 cmd);
1607 }
1608
1609 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1610 {
1611         struct dc_link *dc_link = &core_link->public;
1612
1613         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1614                 BREAK_TO_DEBUGGER();
1615                 return false;
1616         }
1617
1618         dc_sink_retain(sink);
1619
1620         dc_link->remote_sinks[dc_link->sink_count] = sink;
1621         dc_link->sink_count++;
1622
1623         return true;
1624 }
1625
1626 struct dc_sink *dc_link_add_remote_sink(
1627                 const struct dc_link *link,
1628                 const uint8_t *edid,
1629                 int len,
1630                 struct dc_sink_init_data *init_data)
1631 {
1632         struct dc_sink *dc_sink;
1633         enum dc_edid_status edid_status;
1634         struct core_link *core_link = DC_LINK_TO_LINK(link);
1635
1636         if (len > MAX_EDID_BUFFER_SIZE) {
1637                 dm_error("Max EDID buffer size breached!\n");
1638                 return NULL;
1639         }
1640
1641         if (!init_data) {
1642                 BREAK_TO_DEBUGGER();
1643                 return NULL;
1644         }
1645
1646         if (!init_data->link) {
1647                 BREAK_TO_DEBUGGER();
1648                 return NULL;
1649         }
1650
1651         dc_sink = dc_sink_create(init_data);
1652
1653         if (!dc_sink)
1654                 return NULL;
1655
1656         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1657         dc_sink->dc_edid.length = len;
1658
1659         if (!link_add_remote_sink_helper(
1660                         core_link,
1661                         dc_sink))
1662                 goto fail_add_sink;
1663
1664         edid_status = dm_helpers_parse_edid_caps(
1665                         core_link->ctx,
1666                         &dc_sink->dc_edid,
1667                         &dc_sink->edid_caps);
1668
1669         if (edid_status != EDID_OK)
1670                 goto fail;
1671
1672         return dc_sink;
1673 fail:
1674         dc_link_remove_remote_sink(link, dc_sink);
1675 fail_add_sink:
1676         dc_sink_release(dc_sink);
1677         return NULL;
1678 }
1679
1680 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1681 {
1682         struct core_link *core_link = DC_LINK_TO_LINK(link);
1683         struct dc_link *dc_link = &core_link->public;
1684
1685         dc_link->local_sink = sink;
1686
1687         if (sink == NULL) {
1688                 dc_link->type = dc_connection_none;
1689         } else {
1690                 dc_link->type = dc_connection_single;
1691         }
1692 }
1693
1694 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1695 {
1696         int i;
1697         struct core_link *core_link = DC_LINK_TO_LINK(link);
1698         struct dc_link *dc_link = &core_link->public;
1699
1700         if (!link->sink_count) {
1701                 BREAK_TO_DEBUGGER();
1702                 return;
1703         }
1704
1705         for (i = 0; i < dc_link->sink_count; i++) {
1706                 if (dc_link->remote_sinks[i] == sink) {
1707                         dc_sink_release(sink);
1708                         dc_link->remote_sinks[i] = NULL;
1709
1710                         /* shrink array to remove empty place */
1711                         while (i < dc_link->sink_count - 1) {
1712                                 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1713                                 i++;
1714                         }
1715
1716                         dc_link->sink_count--;
1717                         return;
1718                 }
1719         }
1720 }
1721