drm/amd/display: Switch to DRM helpers in s3.
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "bandwidth_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51
52 /*******************************************************************************
53  * Private functions
54  ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
56 {
57         uint32_t i;
58
59         for (i = 0; i < dc->link_count; i++) {
60                 if (NULL != dc->links[i])
61                         link_destroy(&dc->links[i]);
62         }
63 }
64
65 static bool create_links(
66                 struct core_dc *dc,
67                 uint32_t num_virtual_links)
68 {
69         int i;
70         int connectors_num;
71         struct dc_bios *bios = dc->ctx->dc_bios;
72
73         dc->link_count = 0;
74
75         connectors_num = bios->funcs->get_connectors_number(bios);
76
77         if (connectors_num > ENUM_ID_COUNT) {
78                 dm_error(
79                         "DC: Number of connectors %d exceeds maximum of %d!\n",
80                         connectors_num,
81                         ENUM_ID_COUNT);
82                 return false;
83         }
84
85         if (connectors_num == 0 && num_virtual_links == 0) {
86                 dm_error("DC: Number of connectors is zero!\n");
87         }
88
89         dm_output_to_console(
90                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91                 __func__,
92                 connectors_num,
93                 num_virtual_links);
94
95         for (i = 0; i < connectors_num; i++) {
96                 struct link_init_data link_init_params = {0};
97                 struct core_link *link;
98
99                 link_init_params.ctx = dc->ctx;
100                 link_init_params.connector_index = i;
101                 link_init_params.link_index = dc->link_count;
102                 link_init_params.dc = dc;
103                 link = link_create(&link_init_params);
104
105                 if (link) {
106                         dc->links[dc->link_count] = link;
107                         link->dc = dc;
108                         ++dc->link_count;
109                 } else {
110                         dm_error("DC: failed to create link!\n");
111                 }
112         }
113
114         for (i = 0; i < num_virtual_links; i++) {
115                 struct core_link *link = dm_alloc(sizeof(*link));
116                 struct encoder_init_data enc_init = {0};
117
118                 if (link == NULL) {
119                         BREAK_TO_DEBUGGER();
120                         goto failed_alloc;
121                 }
122
123                 link->ctx = dc->ctx;
124                 link->dc = dc;
125                 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128                 link->link_id.enum_id = ENUM_ID_1;
129                 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130
131                 enc_init.ctx = dc->ctx;
132                 enc_init.channel = CHANNEL_ID_UNKNOWN;
133                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135                 enc_init.connector = link->link_id;
136                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138                 enc_init.encoder.enum_id = ENUM_ID_1;
139                 virtual_link_encoder_construct(link->link_enc, &enc_init);
140
141                 link->public.link_index = dc->link_count;
142                 dc->links[dc->link_count] = link;
143                 dc->link_count++;
144         }
145
146         return true;
147
148 failed_alloc:
149         return false;
150 }
151
152 static bool stream_adjust_vmin_vmax(struct dc *dc,
153                 const struct dc_stream **stream, int num_streams,
154                 int vmin, int vmax)
155 {
156         /* TODO: Support multiple streams */
157         struct core_dc *core_dc = DC_TO_CORE(dc);
158         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
159         int i = 0;
160         bool ret = false;
161
162         for (i = 0; i < MAX_PIPES; i++) {
163                 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
164
165                 if (pipe->stream == core_stream && pipe->stream_enc) {
166                         core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
167
168                         /* build and update the info frame */
169                         resource_build_info_frame(pipe);
170                         core_dc->hwss.update_info_frame(pipe);
171
172                         ret = true;
173                 }
174         }
175         return ret;
176 }
177
178
179 static bool set_gamut_remap(struct dc *dc,
180                         const struct dc_stream **stream, int num_streams)
181 {
182         struct core_dc *core_dc = DC_TO_CORE(dc);
183         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184         int i = 0;
185         bool ret = false;
186         struct pipe_ctx *pipes;
187
188         for (i = 0; i < MAX_PIPES; i++) {
189                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
190                                 == core_stream) {
191
192                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193                         core_dc->hwss.set_plane_config(core_dc, pipes,
194                                         &core_dc->current_context->res_ctx);
195                         ret = true;
196                 }
197         }
198
199         return ret;
200 }
201
202 /* This function is not expected to fail, proper implementation of
203  * validation will prevent this from ever being called for unsupported
204  * configurations.
205  */
206 static void stream_update_scaling(
207                 const struct dc *dc,
208                 const struct dc_stream *dc_stream,
209                 const struct rect *src,
210                 const struct rect *dst)
211 {
212         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213         struct core_dc *core_dc = DC_TO_CORE(dc);
214         struct validate_context *cur_ctx = core_dc->current_context;
215         int i;
216
217         if (src)
218                 stream->public.src = *src;
219
220         if (dst)
221                 stream->public.dst = *dst;
222
223         for (i = 0; i < cur_ctx->stream_count; i++) {
224                 struct core_stream *cur_stream = cur_ctx->streams[i];
225
226                 if (stream == cur_stream) {
227                         struct dc_stream_status *status = &cur_ctx->stream_status[i];
228
229                         if (status->surface_count)
230                                 if (!dc_commit_surfaces_to_stream(
231                                                 &core_dc->public,
232                                                 status->surfaces,
233                                                 status->surface_count,
234                                                 &cur_stream->public))
235                                         /* Need to debug validation */
236                                         BREAK_TO_DEBUGGER();
237
238                         return;
239                 }
240         }
241 }
242
243 static bool set_psr_enable(struct dc *dc, bool enable)
244 {
245         struct core_dc *core_dc = DC_TO_CORE(dc);
246         int i;
247
248         for (i = 0; i < core_dc->link_count; i++)
249                 dc_link_set_psr_enable(&core_dc->links[i]->public,
250                                 enable);
251
252         return true;
253 }
254
255
256 static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
257 {
258         struct core_dc *core_dc = DC_TO_CORE(dc);
259         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
260         struct pipe_ctx *pipes;
261         int i;
262         unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
263
264         for (i = 0; i < core_dc->link_count; i++) {
265                 if (core_stream->sink->link == core_dc->links[i])
266                         dc_link_setup_psr(&core_dc->links[i]->public,
267                                         stream);
268         }
269
270         for (i = 0; i < MAX_PIPES; i++) {
271                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
272                                 == core_stream && i != underlay_idx) {
273                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
274                         core_dc->hwss.set_static_screen_control(&pipes, 1,
275                                         0x182);
276                 }
277         }
278
279         return true;
280 }
281
282 static void set_drive_settings(struct dc *dc,
283                 struct link_training_settings *lt_settings,
284                 const struct dc_link *link)
285 {
286         struct core_dc *core_dc = DC_TO_CORE(dc);
287         int i;
288
289         for (i = 0; i < core_dc->link_count; i++) {
290                 if (&core_dc->links[i]->public == link)
291                         break;
292         }
293
294         if (i >= core_dc->link_count)
295                 ASSERT_CRITICAL(false);
296
297         dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
298 }
299
300 static void perform_link_training(struct dc *dc,
301                 struct dc_link_settings *link_setting,
302                 bool skip_video_pattern)
303 {
304         struct core_dc *core_dc = DC_TO_CORE(dc);
305         int i;
306
307         for (i = 0; i < core_dc->link_count; i++)
308                 dc_link_dp_perform_link_training(
309                         &core_dc->links[i]->public,
310                         link_setting,
311                         skip_video_pattern);
312 }
313
314 static void set_preferred_link_settings(struct dc *dc,
315                 struct dc_link_settings *link_setting,
316                 const struct dc_link *link)
317 {
318         struct core_link *core_link = DC_LINK_TO_CORE(link);
319
320         core_link->public.verified_link_cap.lane_count =
321                                 link_setting->lane_count;
322         core_link->public.verified_link_cap.link_rate =
323                                 link_setting->link_rate;
324         dp_retrain_link_dp_test(core_link, link_setting, false);
325 }
326
327 static void enable_hpd(const struct dc_link *link)
328 {
329         dc_link_dp_enable_hpd(link);
330 }
331
332 static void disable_hpd(const struct dc_link *link)
333 {
334         dc_link_dp_disable_hpd(link);
335 }
336
337
338 static void set_test_pattern(
339                 const struct dc_link *link,
340                 enum dp_test_pattern test_pattern,
341                 const struct link_training_settings *p_link_settings,
342                 const unsigned char *p_custom_pattern,
343                 unsigned int cust_pattern_size)
344 {
345         if (link != NULL)
346                 dc_link_dp_set_test_pattern(
347                         link,
348                         test_pattern,
349                         p_link_settings,
350                         p_custom_pattern,
351                         cust_pattern_size);
352 }
353
354 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
355 {
356         core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
357         if (core_dc->hwss.set_drr != NULL) {
358                 core_dc->public.stream_funcs.adjust_vmin_vmax =
359                                 stream_adjust_vmin_vmax;
360         }
361
362         core_dc->public.stream_funcs.set_gamut_remap =
363                         set_gamut_remap;
364
365         core_dc->public.stream_funcs.set_psr_enable =
366                         set_psr_enable;
367
368         core_dc->public.stream_funcs.setup_psr =
369                         setup_psr;
370
371         core_dc->public.link_funcs.set_drive_settings =
372                         set_drive_settings;
373
374         core_dc->public.link_funcs.perform_link_training =
375                         perform_link_training;
376
377         core_dc->public.link_funcs.set_preferred_link_settings =
378                         set_preferred_link_settings;
379
380         core_dc->public.link_funcs.enable_hpd =
381                         enable_hpd;
382
383         core_dc->public.link_funcs.disable_hpd =
384                         disable_hpd;
385
386         core_dc->public.link_funcs.set_test_pattern =
387                         set_test_pattern;
388 }
389
390 static void destruct(struct core_dc *dc)
391 {
392         resource_validate_ctx_destruct(dc->current_context);
393
394         destroy_links(dc);
395
396         dc_destroy_resource_pool(dc);
397
398         if (dc->ctx->gpio_service)
399                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
400
401         if (dc->ctx->i2caux)
402                 dal_i2caux_destroy(&dc->ctx->i2caux);
403
404         if (dc->ctx->created_bios)
405                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
406
407         if (dc->ctx->logger)
408                 dal_logger_destroy(&dc->ctx->logger);
409
410         dm_free(dc->current_context);
411         dc->current_context = NULL;
412         dm_free(dc->temp_flip_context);
413         dc->temp_flip_context = NULL;
414         dm_free(dc->scratch_val_ctx);
415         dc->scratch_val_ctx = NULL;
416
417         dm_free(dc->ctx);
418         dc->ctx = NULL;
419 }
420
421 static bool construct(struct core_dc *dc,
422                 const struct dc_init_data *init_params)
423 {
424         struct dal_logger *logger;
425         struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
426         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
427
428         if (!dc_ctx) {
429                 dm_error("%s: failed to create ctx\n", __func__);
430                 goto ctx_fail;
431         }
432
433         dc->current_context = dm_alloc(sizeof(*dc->current_context));
434         dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
435         dc->scratch_val_ctx = dm_alloc(sizeof(*dc->scratch_val_ctx));
436
437         if (!dc->current_context || !dc->temp_flip_context) {
438                 dm_error("%s: failed to create validate ctx\n", __func__);
439                 goto val_ctx_fail;
440         }
441
442         dc_ctx->cgs_device = init_params->cgs_device;
443         dc_ctx->driver_context = init_params->driver;
444         dc_ctx->dc = &dc->public;
445         dc_ctx->asic_id = init_params->asic_id;
446
447         /* Create logger */
448         logger = dal_logger_create(dc_ctx);
449
450         if (!logger) {
451                 /* can *not* call logger. call base driver 'print error' */
452                 dm_error("%s: failed to create Logger!\n", __func__);
453                 goto logger_fail;
454         }
455         dc_ctx->logger = logger;
456         dc->ctx = dc_ctx;
457         dc->ctx->dce_environment = init_params->dce_environment;
458
459         dc_version = resource_parse_asic_id(init_params->asic_id);
460         dc->ctx->dce_version = dc_version;
461
462         /* Resource should construct all asic specific resources.
463          * This should be the only place where we need to parse the asic id
464          */
465         if (init_params->vbios_override)
466                 dc_ctx->dc_bios = init_params->vbios_override;
467         else {
468                 /* Create BIOS parser */
469                 struct bp_init_data bp_init_data;
470
471                 bp_init_data.ctx = dc_ctx;
472                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
473
474                 dc_ctx->dc_bios = dal_bios_parser_create(
475                                 &bp_init_data, dc_version);
476
477                 if (!dc_ctx->dc_bios) {
478                         ASSERT_CRITICAL(false);
479                         goto bios_fail;
480                 }
481
482                 dc_ctx->created_bios = true;
483                 }
484
485         /* Create I2C AUX */
486         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
487
488         if (!dc_ctx->i2caux) {
489                 ASSERT_CRITICAL(false);
490                 goto failed_to_create_i2caux;
491         }
492
493         /* Create GPIO service */
494         dc_ctx->gpio_service = dal_gpio_service_create(
495                         dc_version,
496                         dc_ctx->dce_environment,
497                         dc_ctx);
498
499         if (!dc_ctx->gpio_service) {
500                 ASSERT_CRITICAL(false);
501                 goto gpio_fail;
502         }
503
504         dc->res_pool = dc_create_resource_pool(
505                         dc,
506                         init_params->num_virtual_links,
507                         dc_version,
508                         init_params->asic_id);
509         if (!dc->res_pool)
510                 goto create_resource_fail;
511
512         if (!create_links(dc, init_params->num_virtual_links))
513                 goto create_links_fail;
514
515         allocate_dc_stream_funcs(dc);
516
517         return true;
518
519         /**** error handling here ****/
520 create_links_fail:
521 create_resource_fail:
522 gpio_fail:
523 failed_to_create_i2caux:
524 bios_fail:
525 logger_fail:
526 val_ctx_fail:
527 ctx_fail:
528         destruct(dc);
529         return false;
530 }
531
532 /*
533 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
534 {
535         fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
536         unsigned int pixDurationInPico = round(pixel_duration);
537
538         DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
539
540         arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
541         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
542         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
543
544         arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
545         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
546         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
547
548         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
549         WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
550
551         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
552         WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
553 }
554 */
555
556 /*******************************************************************************
557  * Public functions
558  ******************************************************************************/
559
560 struct dc *dc_create(const struct dc_init_data *init_params)
561  {
562         struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
563         unsigned int full_pipe_count;
564
565         if (NULL == core_dc)
566                 goto alloc_fail;
567
568         if (false == construct(core_dc, init_params))
569                 goto construct_fail;
570
571         /*TODO: separate HW and SW initialization*/
572         core_dc->hwss.init_hw(core_dc);
573
574         full_pipe_count = core_dc->res_pool->pipe_count;
575         if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
576                 full_pipe_count--;
577         core_dc->public.caps.max_streams = min(
578                         full_pipe_count,
579                         core_dc->res_pool->stream_enc_count);
580
581         core_dc->public.caps.max_links = core_dc->link_count;
582         core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
583
584         core_dc->public.config = init_params->flags;
585
586         dm_logger_write(core_dc->ctx->logger, LOG_DC,
587                         "Display Core initialized\n");
588
589
590         /* TODO: missing feature to be enabled */
591         core_dc->public.debug.disable_dfs_bypass = true;
592
593         return &core_dc->public;
594
595 construct_fail:
596         dm_free(core_dc);
597
598 alloc_fail:
599         return NULL;
600 }
601
602 void dc_destroy(struct dc **dc)
603 {
604         struct core_dc *core_dc = DC_TO_CORE(*dc);
605         destruct(core_dc);
606         dm_free(core_dc);
607         *dc = NULL;
608 }
609
610 static bool is_validation_required(
611                 const struct core_dc *dc,
612                 const struct dc_validation_set set[],
613                 int set_count)
614 {
615         const struct validate_context *context = dc->current_context;
616         int i, j;
617
618         if (context->stream_count != set_count)
619                 return true;
620
621         for (i = 0; i < set_count; i++) {
622
623                 if (set[i].surface_count != context->stream_status[i].surface_count)
624                         return true;
625                 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
626                         return true;
627
628                 for (j = 0; j < set[i].surface_count; j++) {
629                         struct dc_surface temp_surf = { 0 };
630
631                         temp_surf = *context->stream_status[i].surfaces[j];
632                         temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
633                         temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
634                         temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
635
636                         if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
637                                 return true;
638                 }
639         }
640
641         return false;
642 }
643
644 bool dc_validate_resources(
645                 const struct dc *dc,
646                 const struct dc_validation_set set[],
647                 uint8_t set_count)
648 {
649         struct core_dc *core_dc = DC_TO_CORE(dc);
650         enum dc_status result = DC_ERROR_UNEXPECTED;
651         struct validate_context *context;
652
653         if (!is_validation_required(core_dc, set, set_count))
654                 return true;
655
656         context = dm_alloc(sizeof(struct validate_context));
657         if(context == NULL)
658                 goto context_alloc_fail;
659
660         result = core_dc->res_pool->funcs->validate_with_context(
661                                                 core_dc, set, set_count, context);
662
663         resource_validate_ctx_destruct(context);
664         dm_free(context);
665
666 context_alloc_fail:
667         if (result != DC_OK) {
668                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
669                                 "%s:resource validation failed, dc_status:%d\n",
670                                 __func__,
671                                 result);
672         }
673
674         return (result == DC_OK);
675
676 }
677
678 bool dc_validate_guaranteed(
679                 const struct dc *dc,
680                 const struct dc_stream *stream)
681 {
682         struct core_dc *core_dc = DC_TO_CORE(dc);
683         enum dc_status result = DC_ERROR_UNEXPECTED;
684         struct validate_context *context;
685
686         context = dm_alloc(sizeof(struct validate_context));
687         if (context == NULL)
688                 goto context_alloc_fail;
689
690         result = core_dc->res_pool->funcs->validate_guaranteed(
691                                         core_dc, stream, context);
692
693         resource_validate_ctx_destruct(context);
694         dm_free(context);
695
696 context_alloc_fail:
697         if (result != DC_OK) {
698                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
699                         "%s:guaranteed validation failed, dc_status:%d\n",
700                         __func__,
701                         result);
702                 }
703
704         return (result == DC_OK);
705 }
706
707 static void program_timing_sync(
708                 struct core_dc *core_dc,
709                 struct validate_context *ctx)
710 {
711         int i, j;
712         int group_index = 0;
713         int pipe_count = ctx->res_ctx.pool->pipe_count;
714         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
715
716         for (i = 0; i < pipe_count; i++) {
717                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
718                         continue;
719
720                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
721         }
722
723         for (i = 0; i < pipe_count; i++) {
724                 int group_size = 1;
725                 struct pipe_ctx *pipe_set[MAX_PIPES];
726
727                 if (!unsynced_pipes[i])
728                         continue;
729
730                 pipe_set[0] = unsynced_pipes[i];
731                 unsynced_pipes[i] = NULL;
732
733                 /* Add tg to the set, search rest of the tg's for ones with
734                  * same timing, add all tgs with same timing to the group
735                  */
736                 for (j = i + 1; j < pipe_count; j++) {
737                         if (!unsynced_pipes[j])
738                                 continue;
739
740                         if (resource_are_streams_timing_synchronizable(
741                                         unsynced_pipes[j]->stream,
742                                         pipe_set[0]->stream)) {
743                                 pipe_set[group_size] = unsynced_pipes[j];
744                                 unsynced_pipes[j] = NULL;
745                                 group_size++;
746                         }
747                 }
748
749                 /* set first unblanked pipe as master */
750                 for (j = 0; j < group_size; j++) {
751                         struct pipe_ctx *temp;
752
753                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
754                                 if (j == 0)
755                                         break;
756
757                                 temp = pipe_set[0];
758                                 pipe_set[0] = pipe_set[j];
759                                 pipe_set[j] = temp;
760                                 break;
761                         }
762                 }
763
764                 /* remove any other unblanked pipes as they have already been synced */
765                 for (j = j + 1; j < group_size; j++) {
766                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
767                                 group_size--;
768                                 pipe_set[j] = pipe_set[group_size];
769                                 j--;
770                         }
771                 }
772
773                 if (group_size > 1) {
774                         core_dc->hwss.enable_timing_synchronization(
775                                 core_dc, group_index, group_size, pipe_set);
776                         group_index++;
777                 }
778         }
779 }
780
781 static bool streams_changed(
782                 struct core_dc *dc,
783                 const struct dc_stream *streams[],
784                 uint8_t stream_count)
785 {
786         uint8_t i;
787
788         if (stream_count != dc->current_context->stream_count)
789                 return true;
790
791         for (i = 0; i < dc->current_context->stream_count; i++) {
792                 if (&dc->current_context->streams[i]->public != streams[i])
793                         return true;
794         }
795
796         return false;
797 }
798
799 bool dc_commit_streams(
800         struct dc *dc,
801         const struct dc_stream *streams[],
802         uint8_t stream_count)
803 {
804         struct core_dc *core_dc = DC_TO_CORE(dc);
805         struct dc_bios *dcb = core_dc->ctx->dc_bios;
806         enum dc_status result = DC_ERROR_UNEXPECTED;
807         struct validate_context *context;
808         struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
809         int i, j;
810
811         if (false == streams_changed(core_dc, streams, stream_count))
812                 return DC_OK;
813
814         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
815                                 __func__, stream_count);
816
817         for (i = 0; i < stream_count; i++) {
818                 const struct dc_stream *stream = streams[i];
819                 const struct dc_stream_status *status = dc_stream_get_status(stream);
820                 int j;
821
822                 dc_stream_log(stream,
823                                 core_dc->ctx->logger,
824                                 LOG_DC);
825
826                 set[i].stream = stream;
827
828                 if (status) {
829                         set[i].surface_count = status->surface_count;
830                         for (j = 0; j < status->surface_count; j++)
831                                 set[i].surfaces[j] = status->surfaces[j];
832                 }
833
834         }
835
836         context = dm_alloc(sizeof(struct validate_context));
837         if (context == NULL)
838                 goto context_alloc_fail;
839
840         result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
841         if (result != DC_OK){
842                 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
843                                         "%s: Context validation failed! dc_status:%d\n",
844                                         __func__,
845                                         result);
846                 BREAK_TO_DEBUGGER();
847                 resource_validate_ctx_destruct(context);
848                 goto fail;
849         }
850
851         if (!dcb->funcs->is_accelerated_mode(dcb)) {
852                 core_dc->hwss.enable_accelerated_mode(core_dc);
853         }
854
855         if (result == DC_OK) {
856                 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
857         }
858
859         program_timing_sync(core_dc, context);
860
861         for (i = 0; i < context->stream_count; i++) {
862                 const struct core_sink *sink = context->streams[i]->sink;
863
864                 for (j = 0; j < context->stream_status[i].surface_count; j++) {
865                         struct core_surface *surface =
866                                         DC_SURFACE_TO_CORE(context->stream_status[i].surfaces[j]);
867
868                         core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
869                 }
870
871                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
872                                 context->streams[i]->public.timing.h_addressable,
873                                 context->streams[i]->public.timing.v_addressable,
874                                 context->streams[i]->public.timing.h_total,
875                                 context->streams[i]->public.timing.v_total,
876                                 context->streams[i]->public.timing.pix_clk_khz);
877         }
878
879         resource_validate_ctx_destruct(core_dc->current_context);
880
881         if (core_dc->temp_flip_context != core_dc->current_context) {
882                 dm_free(core_dc->temp_flip_context);
883                 core_dc->temp_flip_context = core_dc->current_context;
884         }
885         core_dc->current_context = context;
886         memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
887
888         return (result == DC_OK);
889
890 fail:
891         dm_free(context);
892
893 context_alloc_fail:
894         return (result == DC_OK);
895 }
896
897 bool dc_pre_update_surfaces_to_stream(
898                 struct dc *dc,
899                 const struct dc_surface *const *new_surfaces,
900                 uint8_t new_surface_count,
901                 const struct dc_stream *dc_stream)
902 {
903         int i, j;
904         struct core_dc *core_dc = DC_TO_CORE(dc);
905         struct dc_stream_status *stream_status = NULL;
906         struct validate_context *context;
907         bool ret = true;
908
909         pre_surface_trace(dc, new_surfaces, new_surface_count);
910
911         if (core_dc->current_context->stream_count == 0)
912                 return false;
913
914         /* Cannot commit surface to a stream that is not commited */
915         for (i = 0; i < core_dc->current_context->stream_count; i++)
916                 if (dc_stream == &core_dc->current_context->streams[i]->public)
917                         break;
918
919         if (i == core_dc->current_context->stream_count)
920                 return false;
921
922         stream_status = &core_dc->current_context->stream_status[i];
923
924         if (new_surface_count == stream_status->surface_count) {
925                 bool skip_pre = true;
926
927                 for (i = 0; i < stream_status->surface_count; i++) {
928                         struct dc_surface temp_surf = { 0 };
929
930                         temp_surf = *stream_status->surfaces[i];
931                         temp_surf.clip_rect = new_surfaces[i]->clip_rect;
932                         temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
933                         temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
934
935                         if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
936                                 skip_pre = false;
937                                 break;
938                         }
939                 }
940
941                 if (skip_pre)
942                         return true;
943         }
944
945         context = dm_alloc(sizeof(struct validate_context));
946
947         if (!context) {
948                 dm_error("%s: failed to create validate ctx\n", __func__);
949                 ret = false;
950                 goto val_ctx_fail;
951         }
952
953         resource_validate_ctx_copy_construct(core_dc->current_context, context);
954
955         dm_logger_write(core_dc->ctx->logger, LOG_DC,
956                                 "%s: commit %d surfaces to stream 0x%x\n",
957                                 __func__,
958                                 new_surface_count,
959                                 dc_stream);
960
961         if (!resource_attach_surfaces_to_context(
962                         new_surfaces, new_surface_count, dc_stream, context)) {
963                 BREAK_TO_DEBUGGER();
964                 ret = false;
965                 goto unexpected_fail;
966         }
967
968         for (i = 0; i < new_surface_count; i++)
969                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
970                         if (context->res_ctx.pipe_ctx[j].surface !=
971                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
972                                 continue;
973
974                         resource_build_scaling_params(
975                                 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
976                 }
977
978         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
979                 BREAK_TO_DEBUGGER();
980                 ret = false;
981                 goto unexpected_fail;
982         }
983
984         core_dc->hwss.set_bandwidth(core_dc, context, false);
985
986         for (i = 0; i < new_surface_count; i++)
987                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
988                         if (context->res_ctx.pipe_ctx[j].surface !=
989                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
990                                 continue;
991
992                         core_dc->hwss.prepare_pipe_for_context(
993                                         core_dc,
994                                         &context->res_ctx.pipe_ctx[j],
995                                         context);
996                 }
997
998 unexpected_fail:
999         resource_validate_ctx_destruct(context);
1000         dm_free(context);
1001 val_ctx_fail:
1002
1003         return ret;
1004 }
1005
1006 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1007 {
1008         int i;
1009         struct core_dc *core_dc = DC_TO_CORE(dc);
1010         struct validate_context *context = dm_alloc(sizeof(struct validate_context));
1011
1012         if (!context) {
1013                 dm_error("%s: failed to create validate ctx\n", __func__);
1014                 return false;
1015         }
1016         resource_validate_ctx_copy_construct(core_dc->current_context, context);
1017
1018         post_surface_trace(dc);
1019
1020         for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
1021                 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1022                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1023                         core_dc->hwss.power_down_front_end(
1024                                         core_dc, &context->res_ctx.pipe_ctx[i]);
1025                 }
1026         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1027                 BREAK_TO_DEBUGGER();
1028                 return false;
1029         }
1030
1031         core_dc->hwss.set_bandwidth(core_dc, context, true);
1032
1033         resource_validate_ctx_destruct(core_dc->current_context);
1034         if (core_dc->current_context)
1035                 dm_free(core_dc->current_context);
1036
1037         core_dc->current_context = context;
1038
1039         return true;
1040 }
1041
1042 bool dc_commit_surfaces_to_stream(
1043                 struct dc *dc,
1044                 const struct dc_surface **new_surfaces,
1045                 uint8_t new_surface_count,
1046                 const struct dc_stream *dc_stream)
1047 {
1048         struct dc_surface_update updates[MAX_SURFACES];
1049         struct dc_flip_addrs flip_addr[MAX_SURFACES];
1050         struct dc_plane_info plane_info[MAX_SURFACES];
1051         struct dc_scaling_info scaling_info[MAX_SURFACES];
1052         int i;
1053
1054         if (!dc_pre_update_surfaces_to_stream(
1055                         dc, new_surfaces, new_surface_count, dc_stream))
1056                 return false;
1057
1058         memset(updates, 0, sizeof(updates));
1059         memset(flip_addr, 0, sizeof(flip_addr));
1060         memset(plane_info, 0, sizeof(plane_info));
1061         memset(scaling_info, 0, sizeof(scaling_info));
1062
1063         for (i = 0; i < new_surface_count; i++) {
1064                 updates[i].surface = new_surfaces[i];
1065                 updates[i].gamma =
1066                         (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1067                 flip_addr[i].address = new_surfaces[i]->address;
1068                 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1069                 plane_info[i].color_space = new_surfaces[i]->color_space;
1070                 plane_info[i].format = new_surfaces[i]->format;
1071                 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1072                 plane_info[i].rotation = new_surfaces[i]->rotation;
1073                 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1074                 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1075                 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1076                 plane_info[i].visible = new_surfaces[i]->visible;
1077                 plane_info[i].dcc = new_surfaces[i]->dcc;
1078                 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1079                 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1080                 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1081                 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1082
1083                 updates[i].flip_addr = &flip_addr[i];
1084                 updates[i].plane_info = &plane_info[i];
1085                 updates[i].scaling_info = &scaling_info[i];
1086         }
1087         dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
1088
1089         return dc_post_update_surfaces_to_stream(dc);
1090 }
1091
1092 static bool is_surface_in_context(
1093                 const struct validate_context *context,
1094                 const struct dc_surface *surface)
1095 {
1096         int j;
1097
1098         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1099                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1100
1101                 if (surface == &pipe_ctx->surface->public) {
1102                         return true;
1103                 }
1104         }
1105
1106         return false;
1107 }
1108
1109 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1110 {
1111         switch (format) {
1112         case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1113         case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1114                 return 16;
1115         case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1116         case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1117         case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1118         case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1119                 return 32;
1120         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1121         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1122         case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1123                 return 64;
1124         default:
1125                 ASSERT_CRITICAL(false);
1126                 return -1;
1127         }
1128 }
1129
1130 static enum surface_update_type get_plane_info_update_type(
1131                 const struct dc_surface_update *u)
1132 {
1133         struct dc_plane_info temp_plane_info = { { { { 0 } } } };
1134
1135         if (!u->plane_info)
1136                 return UPDATE_TYPE_FAST;
1137
1138         /* Copy all parameters that will cause a full update
1139          * from current surface, the rest of the parameters
1140          * from provided plane configuration.
1141          * Perform memory compare and special validation
1142          * for those that can cause fast/medium updates
1143          */
1144
1145         /* Full update parameters */
1146         temp_plane_info.color_space = u->surface->color_space;
1147         temp_plane_info.dcc = u->surface->dcc;
1148         temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1149         temp_plane_info.plane_size = u->surface->plane_size;
1150         temp_plane_info.rotation = u->surface->rotation;
1151         temp_plane_info.stereo_format = u->surface->stereo_format;
1152         temp_plane_info.tiling_info = u->surface->tiling_info;
1153         temp_plane_info.visible = u->surface->visible;
1154
1155         /* Special Validation parameters */
1156         temp_plane_info.format = u->plane_info->format;
1157
1158         if (memcmp(u->plane_info, &temp_plane_info,
1159                         sizeof(struct dc_plane_info)) != 0)
1160                 return UPDATE_TYPE_FULL;
1161
1162         if (pixel_format_to_bpp(u->plane_info->format) !=
1163                         pixel_format_to_bpp(u->surface->format)) {
1164                 return UPDATE_TYPE_FULL;
1165         } else {
1166                 return UPDATE_TYPE_MED;
1167         }
1168 }
1169
1170 static enum surface_update_type  get_scaling_info_update_type(
1171                 const struct dc_surface_update *u)
1172 {
1173         struct dc_scaling_info temp_scaling_info = { { 0 } };
1174
1175         if (!u->scaling_info)
1176                 return UPDATE_TYPE_FAST;
1177
1178         /* Copy all parameters that will cause a full update
1179          * from current surface, the rest of the parameters
1180          * from provided plane configuration.
1181          * Perform memory compare and special validation
1182          * for those that can cause fast/medium updates
1183          */
1184
1185         /* Full Update Parameters */
1186         temp_scaling_info.dst_rect = u->surface->dst_rect;
1187         temp_scaling_info.src_rect = u->surface->src_rect;
1188         temp_scaling_info.scaling_quality = u->surface->scaling_quality;
1189
1190         /* Special validation required */
1191         temp_scaling_info.clip_rect = u->scaling_info->clip_rect;
1192
1193         if (memcmp(u->scaling_info, &temp_scaling_info,
1194                         sizeof(struct dc_scaling_info)) != 0)
1195                 return UPDATE_TYPE_FULL;
1196
1197         /* Check Clip rectangles if not equal
1198          * difference is in offsets == > UPDATE_TYPE_FAST
1199          * difference is in dimensions == > UPDATE_TYPE_FULL
1200          */
1201         if (memcmp(&u->scaling_info->clip_rect,
1202                         &u->surface->clip_rect, sizeof(struct rect)) != 0) {
1203                 if ((u->scaling_info->clip_rect.height ==
1204                         u->surface->clip_rect.height) &&
1205                         (u->scaling_info->clip_rect.width ==
1206                         u->surface->clip_rect.width)) {
1207                         return UPDATE_TYPE_FAST;
1208                 } else {
1209                         return UPDATE_TYPE_FULL;
1210                 }
1211         }
1212
1213         return UPDATE_TYPE_FAST;
1214 }
1215
1216 static enum surface_update_type det_surface_update(
1217                 const struct core_dc *dc,
1218                 const struct dc_surface_update *u)
1219 {
1220         const struct validate_context *context = dc->current_context;
1221         enum surface_update_type type = UPDATE_TYPE_FAST;
1222         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1223
1224         if (!is_surface_in_context(context, u->surface))
1225                 return UPDATE_TYPE_FULL;
1226
1227         type = get_plane_info_update_type(u);
1228         if (overall_type < type)
1229                 overall_type = type;
1230
1231         type = get_scaling_info_update_type(u);
1232         if (overall_type < type)
1233                 overall_type = type;
1234
1235         if (u->in_transfer_func ||
1236                 u->out_transfer_func ||
1237                 u->hdr_static_metadata) {
1238                 if (overall_type < UPDATE_TYPE_MED)
1239                         overall_type = UPDATE_TYPE_MED;
1240         }
1241
1242         return overall_type;
1243 }
1244
1245 enum surface_update_type dc_check_update_surfaces_for_stream(
1246                 struct dc *dc,
1247                 struct dc_surface_update *updates,
1248                 int surface_count,
1249                 const struct dc_stream_status *stream_status)
1250 {
1251         struct core_dc *core_dc = DC_TO_CORE(dc);
1252         int i;
1253         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1254
1255         if (stream_status->surface_count != surface_count)
1256                 return UPDATE_TYPE_FULL;
1257
1258         for (i = 0 ; i < surface_count; i++) {
1259                 enum surface_update_type type =
1260                                 det_surface_update(core_dc, &updates[i]);
1261
1262                 if (type == UPDATE_TYPE_FULL)
1263                         return type;
1264
1265                 if (overall_type < type)
1266                         overall_type = type;
1267         }
1268
1269         return overall_type;
1270 }
1271
1272 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1273
1274 void dc_update_surfaces_for_stream(struct dc *dc,
1275                 struct dc_surface_update *updates, int surface_count,
1276                 const struct dc_stream *dc_stream)
1277 {
1278         struct core_dc *core_dc = DC_TO_CORE(dc);
1279         struct validate_context *context;
1280         int i, j;
1281
1282         enum surface_update_type update_type;
1283         const struct dc_stream_status *stream_status;
1284
1285         stream_status = dc_stream_get_status(dc_stream);
1286         ASSERT(stream_status);
1287         if (!stream_status)
1288                 return; /* Cannot commit surface to stream that is not committed */
1289
1290         update_type = dc_check_update_surfaces_for_stream(
1291                         dc, updates, surface_count, stream_status);
1292
1293         if (update_type >= update_surface_trace_level)
1294                 update_surface_trace(dc, updates, surface_count);
1295
1296         if (update_type >= UPDATE_TYPE_FULL) {
1297                 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1298
1299                 for (i = 0; i < surface_count; i++)
1300                         new_surfaces[i] = updates[i].surface;
1301
1302                 /* initialize scratch memory for building context */
1303                 context = core_dc->temp_flip_context;
1304                 resource_validate_ctx_copy_construct(
1305                                 core_dc->current_context, context);
1306
1307                 /* add surface to context */
1308                 if (!resource_attach_surfaces_to_context(
1309                                 new_surfaces, surface_count, dc_stream, context)) {
1310                         BREAK_TO_DEBUGGER();
1311                         return;
1312                 }
1313         } else {
1314                 context = core_dc->current_context;
1315         }
1316         for (i = 0; i < surface_count; i++) {
1317                 /* save update param into surface */
1318                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1319                 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1320
1321                 if (updates[i].flip_addr) {
1322                         surface->public.address = updates[i].flip_addr->address;
1323                         surface->public.flip_immediate =
1324                                         updates[i].flip_addr->flip_immediate;
1325                 }
1326
1327                 if (updates[i].scaling_info) {
1328                         surface->public.scaling_quality =
1329                                         updates[i].scaling_info->scaling_quality;
1330                         surface->public.dst_rect =
1331                                         updates[i].scaling_info->dst_rect;
1332                         surface->public.src_rect =
1333                                         updates[i].scaling_info->src_rect;
1334                         surface->public.clip_rect =
1335                                         updates[i].scaling_info->clip_rect;
1336                 }
1337
1338                 if (updates[i].plane_info) {
1339                         surface->public.color_space =
1340                                         updates[i].plane_info->color_space;
1341                         surface->public.format =
1342                                         updates[i].plane_info->format;
1343                         surface->public.plane_size =
1344                                         updates[i].plane_info->plane_size;
1345                         surface->public.rotation =
1346                                         updates[i].plane_info->rotation;
1347                         surface->public.horizontal_mirror =
1348                                         updates[i].plane_info->horizontal_mirror;
1349                         surface->public.stereo_format =
1350                                         updates[i].plane_info->stereo_format;
1351                         surface->public.tiling_info =
1352                                         updates[i].plane_info->tiling_info;
1353                         surface->public.visible =
1354                                         updates[i].plane_info->visible;
1355                         surface->public.dcc =
1356                                         updates[i].plane_info->dcc;
1357                 }
1358
1359                 /* not sure if we still need this */
1360                 if (update_type == UPDATE_TYPE_FULL) {
1361                         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1362                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1363
1364                                 if (pipe_ctx->surface != surface)
1365                                         continue;
1366
1367                                 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1368                         }
1369                 }
1370
1371                 if (updates[i].gamma &&
1372                         updates[i].gamma != surface->public.gamma_correction) {
1373                         if (surface->public.gamma_correction != NULL)
1374                                 dc_gamma_release(&surface->public.
1375                                                 gamma_correction);
1376
1377                         dc_gamma_retain(updates[i].gamma);
1378                         surface->public.gamma_correction =
1379                                                 updates[i].gamma;
1380                 }
1381
1382                 if (updates[i].in_transfer_func &&
1383                         updates[i].in_transfer_func != surface->public.in_transfer_func) {
1384                         if (surface->public.in_transfer_func != NULL)
1385                                 dc_transfer_func_release(
1386                                                 surface->public.
1387                                                 in_transfer_func);
1388
1389                         dc_transfer_func_retain(
1390                                         updates[i].in_transfer_func);
1391                         surface->public.in_transfer_func =
1392                                         updates[i].in_transfer_func;
1393                 }
1394
1395                 if (updates[i].out_transfer_func &&
1396                         updates[i].out_transfer_func != dc_stream->out_transfer_func) {
1397                         if (dc_stream->out_transfer_func != NULL)
1398                                 dc_transfer_func_release(dc_stream->out_transfer_func);
1399                         dc_transfer_func_retain(updates[i].out_transfer_func);
1400                         stream->public.out_transfer_func = updates[i].out_transfer_func;
1401                 }
1402                 if (updates[i].hdr_static_metadata)
1403                         surface->public.hdr_static_ctx =
1404                                 *(updates[i].hdr_static_metadata);
1405         }
1406
1407         if (update_type == UPDATE_TYPE_FULL &&
1408                         !core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1409                 BREAK_TO_DEBUGGER();
1410                 return;
1411         }
1412
1413         if (!surface_count)  /* reset */
1414                 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1415
1416         for (i = 0; i < surface_count; i++) {
1417                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1418
1419                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1420                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1421                         struct pipe_ctx *cur_pipe_ctx;
1422                         bool is_new_pipe_surface = true;
1423
1424                         if (pipe_ctx->surface != surface)
1425                                 continue;
1426
1427                         if (update_type >= UPDATE_TYPE_MED) {
1428                                 /* only apply for top pipe */
1429                                 if (!pipe_ctx->top_pipe) {
1430                                         core_dc->hwss.apply_ctx_for_surface(core_dc,
1431                                                          surface, context);
1432                                         context_timing_trace(dc, &context->res_ctx);
1433                                 }
1434                         }
1435
1436                         if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1437                                 core_dc->hwss.pipe_control_lock(
1438                                                 core_dc,
1439                                                 pipe_ctx,
1440                                                 true);
1441                         }
1442
1443                         if (updates[i].flip_addr)
1444                                 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1445
1446                         if (update_type == UPDATE_TYPE_FAST)
1447                                 continue;
1448
1449                         cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1450                         if (cur_pipe_ctx->surface == pipe_ctx->surface)
1451                                 is_new_pipe_surface = false;
1452
1453                         if (is_new_pipe_surface ||
1454                                         updates[i].in_transfer_func)
1455                                 core_dc->hwss.set_input_transfer_func(
1456                                                 pipe_ctx, pipe_ctx->surface);
1457
1458                         if (is_new_pipe_surface ||
1459                                         updates[i].out_transfer_func)
1460                                 core_dc->hwss.set_output_transfer_func(
1461                                                 pipe_ctx,
1462                                                 pipe_ctx->surface,
1463                                                 pipe_ctx->stream);
1464
1465                         if (updates[i].hdr_static_metadata) {
1466                                 resource_build_info_frame(pipe_ctx);
1467                                 core_dc->hwss.update_info_frame(pipe_ctx);
1468                         }
1469                 }
1470         }
1471
1472         for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1473                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1474
1475                 for (j = 0; j < surface_count; j++) {
1476                         if (updates[j].surface == &pipe_ctx->surface->public) {
1477                                 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1478                                         core_dc->hwss.pipe_control_lock(
1479                                                         core_dc,
1480                                                         pipe_ctx,
1481                                                         false);
1482                                 }
1483                                 break;
1484                         }
1485                 }
1486         }
1487
1488         if (core_dc->current_context != context) {
1489                 resource_validate_ctx_destruct(core_dc->current_context);
1490                 core_dc->temp_flip_context = core_dc->current_context;
1491
1492                 core_dc->current_context = context;
1493         }
1494 }
1495
1496 uint8_t dc_get_current_stream_count(const struct dc *dc)
1497 {
1498         struct core_dc *core_dc = DC_TO_CORE(dc);
1499         return core_dc->current_context->stream_count;
1500 }
1501
1502 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1503 {
1504         struct core_dc *core_dc = DC_TO_CORE(dc);
1505         if (i < core_dc->current_context->stream_count)
1506                 return &(core_dc->current_context->streams[i]->public);
1507         return NULL;
1508 }
1509
1510 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1511 {
1512         struct core_dc *core_dc = DC_TO_CORE(dc);
1513         return &core_dc->links[link_index]->public;
1514 }
1515
1516 const struct graphics_object_id dc_get_link_id_at_index(
1517         struct dc *dc, uint32_t link_index)
1518 {
1519         struct core_dc *core_dc = DC_TO_CORE(dc);
1520         return core_dc->links[link_index]->link_id;
1521 }
1522
1523 const struct ddc_service *dc_get_ddc_at_index(
1524         struct dc *dc, uint32_t link_index)
1525 {
1526         struct core_dc *core_dc = DC_TO_CORE(dc);
1527         return core_dc->links[link_index]->ddc;
1528 }
1529
1530 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1531         struct dc *dc, uint32_t link_index)
1532 {
1533         struct core_dc *core_dc = DC_TO_CORE(dc);
1534         return core_dc->links[link_index]->public.irq_source_hpd;
1535 }
1536
1537 const struct audio **dc_get_audios(struct dc *dc)
1538 {
1539         struct core_dc *core_dc = DC_TO_CORE(dc);
1540         return (const struct audio **)core_dc->res_pool->audios;
1541 }
1542
1543 void dc_flip_surface_addrs(
1544                 struct dc *dc,
1545                 const struct dc_surface *const surfaces[],
1546                 struct dc_flip_addrs flip_addrs[],
1547                 uint32_t count)
1548 {
1549         struct core_dc *core_dc = DC_TO_CORE(dc);
1550         int i, j;
1551
1552         for (i = 0; i < count; i++) {
1553                 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1554
1555                 surface->public.address = flip_addrs[i].address;
1556                 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1557
1558                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1559                         struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1560
1561                         if (pipe_ctx->surface != surface)
1562                                 continue;
1563
1564                         core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1565                 }
1566         }
1567 }
1568
1569 enum dc_irq_source dc_interrupt_to_irq_source(
1570                 struct dc *dc,
1571                 uint32_t src_id,
1572                 uint32_t ext_id)
1573 {
1574         struct core_dc *core_dc = DC_TO_CORE(dc);
1575         return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1576 }
1577
1578 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1579 {
1580         struct core_dc *core_dc = DC_TO_CORE(dc);
1581         dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1582 }
1583
1584 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1585 {
1586         struct core_dc *core_dc = DC_TO_CORE(dc);
1587         dal_irq_service_ack(core_dc->res_pool->irqs, src);
1588 }
1589
1590 void dc_set_power_state(
1591         struct dc *dc,
1592         enum dc_acpi_cm_power_state power_state)
1593 {
1594         struct core_dc *core_dc = DC_TO_CORE(dc);
1595
1596         switch (power_state) {
1597         case DC_ACPI_CM_POWER_STATE_D0:
1598                 core_dc->hwss.init_hw(core_dc);
1599                 break;
1600         default:
1601
1602                 core_dc->hwss.power_down(core_dc);
1603
1604                 /* Zero out the current context so that on resume we start with
1605                  * clean state, and dc hw programming optimizations will not
1606                  * cause any trouble.
1607                  */
1608                 memset(core_dc->current_context, 0,
1609                                 sizeof(*core_dc->current_context));
1610
1611                 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1612
1613                 break;
1614         }
1615
1616 }
1617
1618 void dc_resume(const struct dc *dc)
1619 {
1620         struct core_dc *core_dc = DC_TO_CORE(dc);
1621
1622         uint32_t i;
1623
1624         for (i = 0; i < core_dc->link_count; i++)
1625                 core_link_resume(core_dc->links[i]);
1626 }
1627
1628 bool dc_read_dpcd(
1629                 struct dc *dc,
1630                 uint32_t link_index,
1631                 uint32_t address,
1632                 uint8_t *data,
1633                 uint32_t size)
1634 {
1635         struct core_dc *core_dc = DC_TO_CORE(dc);
1636
1637         struct core_link *link = core_dc->links[link_index];
1638         enum ddc_result r = dal_ddc_service_read_dpcd_data(
1639                         link->ddc,
1640                         address,
1641                         data,
1642                         size);
1643         return r == DDC_RESULT_SUCESSFULL;
1644 }
1645
1646 bool dc_query_ddc_data(
1647                 struct dc *dc,
1648                 uint32_t link_index,
1649                 uint32_t address,
1650                 uint8_t *write_buf,
1651                 uint32_t write_size,
1652                 uint8_t *read_buf,
1653                 uint32_t read_size) {
1654
1655         struct core_dc *core_dc = DC_TO_CORE(dc);
1656
1657         struct core_link *link = core_dc->links[link_index];
1658
1659         bool result = dal_ddc_service_query_ddc_data(
1660                         link->ddc,
1661                         address,
1662                         write_buf,
1663                         write_size,
1664                         read_buf,
1665                         read_size);
1666
1667         return result;
1668 }
1669
1670
1671 bool dc_write_dpcd(
1672                 struct dc *dc,
1673                 uint32_t link_index,
1674                 uint32_t address,
1675                 const uint8_t *data,
1676                 uint32_t size)
1677 {
1678         struct core_dc *core_dc = DC_TO_CORE(dc);
1679
1680         struct core_link *link = core_dc->links[link_index];
1681
1682         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1683                         link->ddc,
1684                         address,
1685                         data,
1686                         size);
1687         return r == DDC_RESULT_SUCESSFULL;
1688 }
1689
1690 bool dc_submit_i2c(
1691                 struct dc *dc,
1692                 uint32_t link_index,
1693                 struct i2c_command *cmd)
1694 {
1695         struct core_dc *core_dc = DC_TO_CORE(dc);
1696
1697         struct core_link *link = core_dc->links[link_index];
1698         struct ddc_service *ddc = link->ddc;
1699
1700         return dal_i2caux_submit_i2c_command(
1701                 ddc->ctx->i2caux,
1702                 ddc->ddc_pin,
1703                 cmd);
1704 }
1705
1706 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1707 {
1708         struct dc_link *dc_link = &core_link->public;
1709
1710         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1711                 BREAK_TO_DEBUGGER();
1712                 return false;
1713         }
1714
1715         dc_sink_retain(sink);
1716
1717         dc_link->remote_sinks[dc_link->sink_count] = sink;
1718         dc_link->sink_count++;
1719
1720         return true;
1721 }
1722
1723 struct dc_sink *dc_link_add_remote_sink(
1724                 const struct dc_link *link,
1725                 const uint8_t *edid,
1726                 int len,
1727                 struct dc_sink_init_data *init_data)
1728 {
1729         struct dc_sink *dc_sink;
1730         enum dc_edid_status edid_status;
1731         struct core_link *core_link = DC_LINK_TO_LINK(link);
1732
1733         if (len > MAX_EDID_BUFFER_SIZE) {
1734                 dm_error("Max EDID buffer size breached!\n");
1735                 return NULL;
1736         }
1737
1738         if (!init_data) {
1739                 BREAK_TO_DEBUGGER();
1740                 return NULL;
1741         }
1742
1743         if (!init_data->link) {
1744                 BREAK_TO_DEBUGGER();
1745                 return NULL;
1746         }
1747
1748         dc_sink = dc_sink_create(init_data);
1749
1750         if (!dc_sink)
1751                 return NULL;
1752
1753         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1754         dc_sink->dc_edid.length = len;
1755
1756         if (!link_add_remote_sink_helper(
1757                         core_link,
1758                         dc_sink))
1759                 goto fail_add_sink;
1760
1761         edid_status = dm_helpers_parse_edid_caps(
1762                         core_link->ctx,
1763                         &dc_sink->dc_edid,
1764                         &dc_sink->edid_caps);
1765
1766         if (edid_status != EDID_OK)
1767                 goto fail;
1768
1769         return dc_sink;
1770 fail:
1771         dc_link_remove_remote_sink(link, dc_sink);
1772 fail_add_sink:
1773         dc_sink_release(dc_sink);
1774         return NULL;
1775 }
1776
1777 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1778 {
1779         struct core_link *core_link = DC_LINK_TO_LINK(link);
1780         struct dc_link *dc_link = &core_link->public;
1781
1782         dc_link->local_sink = sink;
1783
1784         if (sink == NULL) {
1785                 dc_link->type = dc_connection_none;
1786         } else {
1787                 dc_link->type = dc_connection_single;
1788         }
1789 }
1790
1791 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1792 {
1793         int i;
1794         struct core_link *core_link = DC_LINK_TO_LINK(link);
1795         struct dc_link *dc_link = &core_link->public;
1796
1797         if (!link->sink_count) {
1798                 BREAK_TO_DEBUGGER();
1799                 return;
1800         }
1801
1802         for (i = 0; i < dc_link->sink_count; i++) {
1803                 if (dc_link->remote_sinks[i] == sink) {
1804                         dc_sink_release(sink);
1805                         dc_link->remote_sinks[i] = NULL;
1806
1807                         /* shrink array to remove empty place */
1808                         while (i < dc_link->sink_count - 1) {
1809                                 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1810                                 i++;
1811                         }
1812
1813                         dc_link->sink_count--;
1814                         return;
1815                 }
1816         }
1817 }
1818