drm/amd/display: remove apply_clk_constraints, used validate_bandwidth universally
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "bandwidth_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51
52 /*******************************************************************************
53  * Private functions
54  ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
56 {
57         uint32_t i;
58
59         for (i = 0; i < dc->link_count; i++) {
60                 if (NULL != dc->links[i])
61                         link_destroy(&dc->links[i]);
62         }
63 }
64
65 static bool create_links(
66                 struct core_dc *dc,
67                 uint32_t num_virtual_links)
68 {
69         int i;
70         int connectors_num;
71         struct dc_bios *bios = dc->ctx->dc_bios;
72
73         dc->link_count = 0;
74
75         connectors_num = bios->funcs->get_connectors_number(bios);
76
77         if (connectors_num > ENUM_ID_COUNT) {
78                 dm_error(
79                         "DC: Number of connectors %d exceeds maximum of %d!\n",
80                         connectors_num,
81                         ENUM_ID_COUNT);
82                 return false;
83         }
84
85         if (connectors_num == 0 && num_virtual_links == 0) {
86                 dm_error("DC: Number of connectors is zero!\n");
87         }
88
89         dm_output_to_console(
90                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91                 __func__,
92                 connectors_num,
93                 num_virtual_links);
94
95         for (i = 0; i < connectors_num; i++) {
96                 struct link_init_data link_init_params = {0};
97                 struct core_link *link;
98
99                 link_init_params.ctx = dc->ctx;
100                 link_init_params.connector_index = i;
101                 link_init_params.link_index = dc->link_count;
102                 link_init_params.dc = dc;
103                 link = link_create(&link_init_params);
104
105                 if (link) {
106                         dc->links[dc->link_count] = link;
107                         link->dc = dc;
108                         ++dc->link_count;
109                 } else {
110                         dm_error("DC: failed to create link!\n");
111                 }
112         }
113
114         for (i = 0; i < num_virtual_links; i++) {
115                 struct core_link *link = dm_alloc(sizeof(*link));
116                 struct encoder_init_data enc_init = {0};
117
118                 if (link == NULL) {
119                         BREAK_TO_DEBUGGER();
120                         goto failed_alloc;
121                 }
122
123                 link->ctx = dc->ctx;
124                 link->dc = dc;
125                 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128                 link->link_id.enum_id = ENUM_ID_1;
129                 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130
131                 enc_init.ctx = dc->ctx;
132                 enc_init.channel = CHANNEL_ID_UNKNOWN;
133                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135                 enc_init.connector = link->link_id;
136                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138                 enc_init.encoder.enum_id = ENUM_ID_1;
139                 virtual_link_encoder_construct(link->link_enc, &enc_init);
140
141                 link->public.link_index = dc->link_count;
142                 dc->links[dc->link_count] = link;
143                 dc->link_count++;
144         }
145
146         return true;
147
148 failed_alloc:
149         return false;
150 }
151
152 static bool stream_adjust_vmin_vmax(struct dc *dc,
153                 const struct dc_stream **stream, int num_streams,
154                 int vmin, int vmax)
155 {
156         /* TODO: Support multiple streams */
157         struct core_dc *core_dc = DC_TO_CORE(dc);
158         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
159         int i = 0;
160         bool ret = false;
161
162         for (i = 0; i < MAX_PIPES; i++) {
163                 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
164
165                 if (pipe->stream == core_stream && pipe->stream_enc) {
166                         core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
167
168                         /* build and update the info frame */
169                         resource_build_info_frame(pipe);
170                         core_dc->hwss.update_info_frame(pipe);
171
172                         ret = true;
173                 }
174         }
175         return ret;
176 }
177
178
179 static bool set_gamut_remap(struct dc *dc,
180                         const struct dc_stream **stream, int num_streams)
181 {
182         struct core_dc *core_dc = DC_TO_CORE(dc);
183         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184         int i = 0;
185         bool ret = false;
186         struct pipe_ctx *pipes;
187
188         for (i = 0; i < MAX_PIPES; i++) {
189                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
190                                 == core_stream) {
191
192                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193                         core_dc->hwss.set_plane_config(core_dc, pipes,
194                                         &core_dc->current_context->res_ctx);
195                         ret = true;
196                 }
197         }
198
199         return ret;
200 }
201
202 /* This function is not expected to fail, proper implementation of
203  * validation will prevent this from ever being called for unsupported
204  * configurations.
205  */
206 static void stream_update_scaling(
207                 const struct dc *dc,
208                 const struct dc_stream *dc_stream,
209                 const struct rect *src,
210                 const struct rect *dst)
211 {
212         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213         struct core_dc *core_dc = DC_TO_CORE(dc);
214         struct validate_context *cur_ctx = core_dc->current_context;
215         int i;
216
217         if (src)
218                 stream->public.src = *src;
219
220         if (dst)
221                 stream->public.dst = *dst;
222
223         for (i = 0; i < cur_ctx->stream_count; i++) {
224                 struct core_stream *cur_stream = cur_ctx->streams[i];
225
226                 if (stream == cur_stream) {
227                         struct dc_stream_status *status = &cur_ctx->stream_status[i];
228
229                         if (status->surface_count)
230                                 if (!dc_commit_surfaces_to_stream(
231                                                 &core_dc->public,
232                                                 status->surfaces,
233                                                 status->surface_count,
234                                                 &cur_stream->public))
235                                         /* Need to debug validation */
236                                         BREAK_TO_DEBUGGER();
237
238                         return;
239                 }
240         }
241 }
242
243 static bool set_psr_enable(struct dc *dc, bool enable)
244 {
245         struct core_dc *core_dc = DC_TO_CORE(dc);
246         int i;
247
248         for (i = 0; i < core_dc->link_count; i++)
249                 dc_link_set_psr_enable(&core_dc->links[i]->public,
250                                 enable);
251
252         return true;
253 }
254
255
256 static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
257 {
258         struct core_dc *core_dc = DC_TO_CORE(dc);
259         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
260         struct pipe_ctx *pipes;
261         int i;
262         unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
263
264         for (i = 0; i < core_dc->link_count; i++) {
265                 if (core_stream->sink->link == core_dc->links[i])
266                         dc_link_setup_psr(&core_dc->links[i]->public,
267                                         stream);
268         }
269
270         for (i = 0; i < MAX_PIPES; i++) {
271                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
272                                 == core_stream && i != underlay_idx) {
273                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
274                         core_dc->hwss.set_static_screen_control(&pipes, 1,
275                                         0x182);
276                 }
277         }
278
279         return true;
280 }
281
282 static void set_drive_settings(struct dc *dc,
283                 struct link_training_settings *lt_settings,
284                 const struct dc_link *link)
285 {
286         struct core_dc *core_dc = DC_TO_CORE(dc);
287         int i;
288
289         for (i = 0; i < core_dc->link_count; i++) {
290                 if (&core_dc->links[i]->public == link)
291                         break;
292         }
293
294         if (i >= core_dc->link_count)
295                 ASSERT_CRITICAL(false);
296
297         dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
298 }
299
300 static void perform_link_training(struct dc *dc,
301                 struct dc_link_settings *link_setting,
302                 bool skip_video_pattern)
303 {
304         struct core_dc *core_dc = DC_TO_CORE(dc);
305         int i;
306
307         for (i = 0; i < core_dc->link_count; i++)
308                 dc_link_dp_perform_link_training(
309                         &core_dc->links[i]->public,
310                         link_setting,
311                         skip_video_pattern);
312 }
313
314 static void set_preferred_link_settings(struct dc *dc,
315                 struct dc_link_settings *link_setting,
316                 const struct dc_link *link)
317 {
318         struct core_link *core_link = DC_LINK_TO_CORE(link);
319
320         core_link->public.verified_link_cap.lane_count =
321                                 link_setting->lane_count;
322         core_link->public.verified_link_cap.link_rate =
323                                 link_setting->link_rate;
324         dp_retrain_link_dp_test(core_link, link_setting, false);
325 }
326
327 static void enable_hpd(const struct dc_link *link)
328 {
329         dc_link_dp_enable_hpd(link);
330 }
331
332 static void disable_hpd(const struct dc_link *link)
333 {
334         dc_link_dp_disable_hpd(link);
335 }
336
337
338 static void set_test_pattern(
339                 const struct dc_link *link,
340                 enum dp_test_pattern test_pattern,
341                 const struct link_training_settings *p_link_settings,
342                 const unsigned char *p_custom_pattern,
343                 unsigned int cust_pattern_size)
344 {
345         if (link != NULL)
346                 dc_link_dp_set_test_pattern(
347                         link,
348                         test_pattern,
349                         p_link_settings,
350                         p_custom_pattern,
351                         cust_pattern_size);
352 }
353
354 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
355 {
356         core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
357         if (core_dc->hwss.set_drr != NULL) {
358                 core_dc->public.stream_funcs.adjust_vmin_vmax =
359                                 stream_adjust_vmin_vmax;
360         }
361
362         core_dc->public.stream_funcs.set_gamut_remap =
363                         set_gamut_remap;
364
365         core_dc->public.stream_funcs.set_psr_enable =
366                         set_psr_enable;
367
368         core_dc->public.stream_funcs.setup_psr =
369                         setup_psr;
370
371         core_dc->public.link_funcs.set_drive_settings =
372                         set_drive_settings;
373
374         core_dc->public.link_funcs.perform_link_training =
375                         perform_link_training;
376
377         core_dc->public.link_funcs.set_preferred_link_settings =
378                         set_preferred_link_settings;
379
380         core_dc->public.link_funcs.enable_hpd =
381                         enable_hpd;
382
383         core_dc->public.link_funcs.disable_hpd =
384                         disable_hpd;
385
386         core_dc->public.link_funcs.set_test_pattern =
387                         set_test_pattern;
388 }
389
390 static void destruct(struct core_dc *dc)
391 {
392         resource_validate_ctx_destruct(dc->current_context);
393
394         destroy_links(dc);
395
396         dc_destroy_resource_pool(dc);
397
398         if (dc->ctx->gpio_service)
399                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
400
401         if (dc->ctx->i2caux)
402                 dal_i2caux_destroy(&dc->ctx->i2caux);
403
404         if (dc->ctx->created_bios)
405                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
406
407         if (dc->ctx->logger)
408                 dal_logger_destroy(&dc->ctx->logger);
409
410         dm_free(dc->current_context);
411         dc->current_context = NULL;
412         dm_free(dc->temp_flip_context);
413         dc->temp_flip_context = NULL;
414         dm_free(dc->scratch_val_ctx);
415         dc->scratch_val_ctx = NULL;
416
417         dm_free(dc->ctx);
418         dc->ctx = NULL;
419 }
420
421 static bool construct(struct core_dc *dc,
422                 const struct dc_init_data *init_params)
423 {
424         struct dal_logger *logger;
425         struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
426         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
427
428         if (!dc_ctx) {
429                 dm_error("%s: failed to create ctx\n", __func__);
430                 goto ctx_fail;
431         }
432
433         dc->current_context = dm_alloc(sizeof(*dc->current_context));
434         dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
435         dc->scratch_val_ctx = dm_alloc(sizeof(*dc->scratch_val_ctx));
436
437         if (!dc->current_context || !dc->temp_flip_context) {
438                 dm_error("%s: failed to create validate ctx\n", __func__);
439                 goto val_ctx_fail;
440         }
441
442         dc_ctx->cgs_device = init_params->cgs_device;
443         dc_ctx->driver_context = init_params->driver;
444         dc_ctx->dc = &dc->public;
445         dc_ctx->asic_id = init_params->asic_id;
446
447         /* Create logger */
448         logger = dal_logger_create(dc_ctx);
449
450         if (!logger) {
451                 /* can *not* call logger. call base driver 'print error' */
452                 dm_error("%s: failed to create Logger!\n", __func__);
453                 goto logger_fail;
454         }
455         dc_ctx->logger = logger;
456         dc->ctx = dc_ctx;
457         dc->ctx->dce_environment = init_params->dce_environment;
458
459         dc_version = resource_parse_asic_id(init_params->asic_id);
460         dc->ctx->dce_version = dc_version;
461
462         /* Resource should construct all asic specific resources.
463          * This should be the only place where we need to parse the asic id
464          */
465         if (init_params->vbios_override)
466                 dc_ctx->dc_bios = init_params->vbios_override;
467         else {
468                 /* Create BIOS parser */
469                 struct bp_init_data bp_init_data;
470                 bp_init_data.ctx = dc_ctx;
471                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
472
473                 dc_ctx->dc_bios = dal_bios_parser_create(
474                                 &bp_init_data, dc_version);
475
476                 if (!dc_ctx->dc_bios) {
477                         ASSERT_CRITICAL(false);
478                         goto bios_fail;
479                 }
480
481                 dc_ctx->created_bios = true;
482         }
483
484         /* Create I2C AUX */
485         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
486
487         if (!dc_ctx->i2caux) {
488                 ASSERT_CRITICAL(false);
489                 goto failed_to_create_i2caux;
490         }
491
492         /* Create GPIO service */
493         dc_ctx->gpio_service = dal_gpio_service_create(
494                         dc_version,
495                         dc_ctx->dce_environment,
496                         dc_ctx);
497
498         if (!dc_ctx->gpio_service) {
499                 ASSERT_CRITICAL(false);
500                 goto gpio_fail;
501         }
502
503         dc->res_pool = dc_create_resource_pool(
504                         dc,
505                         init_params->num_virtual_links,
506                         dc_version,
507                         init_params->asic_id);
508         if (!dc->res_pool)
509                 goto create_resource_fail;
510
511         if (!create_links(dc, init_params->num_virtual_links))
512                 goto create_links_fail;
513
514         allocate_dc_stream_funcs(dc);
515
516         return true;
517
518         /**** error handling here ****/
519 create_links_fail:
520 create_resource_fail:
521 gpio_fail:
522 failed_to_create_i2caux:
523 bios_fail:
524 logger_fail:
525 val_ctx_fail:
526 ctx_fail:
527         destruct(dc);
528         return false;
529 }
530
531 /*
532 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
533 {
534         fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
535         unsigned int pixDurationInPico = round(pixel_duration);
536
537         DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
538
539         arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
540         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
541         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
542
543         arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
544         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
545         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
546
547         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
548         WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
549
550         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
551         WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
552 }
553 */
554
555 /*******************************************************************************
556  * Public functions
557  ******************************************************************************/
558
559 struct dc *dc_create(const struct dc_init_data *init_params)
560  {
561         struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
562         unsigned int full_pipe_count;
563
564         if (NULL == core_dc)
565                 goto alloc_fail;
566
567         if (false == construct(core_dc, init_params))
568                 goto construct_fail;
569
570         /*TODO: separate HW and SW initialization*/
571         core_dc->hwss.init_hw(core_dc);
572
573         full_pipe_count = core_dc->res_pool->pipe_count;
574         if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
575                 full_pipe_count--;
576         core_dc->public.caps.max_streams = min(
577                         full_pipe_count,
578                         core_dc->res_pool->stream_enc_count);
579
580         core_dc->public.caps.max_links = core_dc->link_count;
581         core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
582
583         core_dc->public.config = init_params->flags;
584
585         dm_logger_write(core_dc->ctx->logger, LOG_DC,
586                         "Display Core initialized\n");
587
588
589         /* TODO: missing feature to be enabled */
590         core_dc->public.debug.disable_dfs_bypass = true;
591
592         return &core_dc->public;
593
594 construct_fail:
595         dm_free(core_dc);
596
597 alloc_fail:
598         return NULL;
599 }
600
601 void dc_destroy(struct dc **dc)
602 {
603         struct core_dc *core_dc = DC_TO_CORE(*dc);
604         destruct(core_dc);
605         dm_free(core_dc);
606         *dc = NULL;
607 }
608
609 static bool is_validation_required(
610                 const struct core_dc *dc,
611                 const struct dc_validation_set set[],
612                 int set_count)
613 {
614         const struct validate_context *context = dc->current_context;
615         int i, j;
616
617         if (context->stream_count != set_count)
618                 return true;
619
620         for (i = 0; i < set_count; i++) {
621
622                 if (set[i].surface_count != context->stream_status[i].surface_count)
623                         return true;
624                 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
625                         return true;
626
627                 for (j = 0; j < set[i].surface_count; j++) {
628                         struct dc_surface temp_surf = { 0 };
629
630                         temp_surf = *context->stream_status[i].surfaces[j];
631                         temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
632                         temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
633                         temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
634
635                         if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
636                                 return true;
637                 }
638         }
639
640         return false;
641 }
642
643 bool dc_validate_resources(
644                 const struct dc *dc,
645                 const struct dc_validation_set set[],
646                 uint8_t set_count)
647 {
648         struct core_dc *core_dc = DC_TO_CORE(dc);
649         enum dc_status result = DC_ERROR_UNEXPECTED;
650         struct validate_context *context;
651
652         if (!is_validation_required(core_dc, set, set_count))
653                 return true;
654
655         context = dm_alloc(sizeof(struct validate_context));
656         if(context == NULL)
657                 goto context_alloc_fail;
658
659         result = core_dc->res_pool->funcs->validate_with_context(
660                                                 core_dc, set, set_count, context);
661
662         resource_validate_ctx_destruct(context);
663         dm_free(context);
664
665 context_alloc_fail:
666         if (result != DC_OK) {
667                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
668                                 "%s:resource validation failed, dc_status:%d\n",
669                                 __func__,
670                                 result);
671         }
672
673         return (result == DC_OK);
674
675 }
676
677 bool dc_validate_guaranteed(
678                 const struct dc *dc,
679                 const struct dc_stream *stream)
680 {
681         struct core_dc *core_dc = DC_TO_CORE(dc);
682         enum dc_status result = DC_ERROR_UNEXPECTED;
683         struct validate_context *context;
684
685         context = dm_alloc(sizeof(struct validate_context));
686         if (context == NULL)
687                 goto context_alloc_fail;
688
689         result = core_dc->res_pool->funcs->validate_guaranteed(
690                                         core_dc, stream, context);
691
692         resource_validate_ctx_destruct(context);
693         dm_free(context);
694
695 context_alloc_fail:
696         if (result != DC_OK) {
697                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
698                         "%s:guaranteed validation failed, dc_status:%d\n",
699                         __func__,
700                         result);
701                 }
702
703         return (result == DC_OK);
704 }
705
706 static void program_timing_sync(
707                 struct core_dc *core_dc,
708                 struct validate_context *ctx)
709 {
710         int i, j;
711         int group_index = 0;
712         int pipe_count = ctx->res_ctx.pool->pipe_count;
713         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
714
715         for (i = 0; i < pipe_count; i++) {
716                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
717                         continue;
718
719                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
720         }
721
722         for (i = 0; i < pipe_count; i++) {
723                 int group_size = 1;
724                 struct pipe_ctx *pipe_set[MAX_PIPES];
725
726                 if (!unsynced_pipes[i])
727                         continue;
728
729                 pipe_set[0] = unsynced_pipes[i];
730                 unsynced_pipes[i] = NULL;
731
732                 /* Add tg to the set, search rest of the tg's for ones with
733                  * same timing, add all tgs with same timing to the group
734                  */
735                 for (j = i + 1; j < pipe_count; j++) {
736                         if (!unsynced_pipes[j])
737                                 continue;
738
739                         if (resource_are_streams_timing_synchronizable(
740                                         unsynced_pipes[j]->stream,
741                                         pipe_set[0]->stream)) {
742                                 pipe_set[group_size] = unsynced_pipes[j];
743                                 unsynced_pipes[j] = NULL;
744                                 group_size++;
745                         }
746                 }
747
748                 /* set first unblanked pipe as master */
749                 for (j = 0; j < group_size; j++) {
750                         struct pipe_ctx *temp;
751
752                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
753                                 if (j == 0)
754                                         break;
755
756                                 temp = pipe_set[0];
757                                 pipe_set[0] = pipe_set[j];
758                                 pipe_set[j] = temp;
759                                 break;
760                         }
761                 }
762
763                 /* remove any other unblanked pipes as they have already been synced */
764                 for (j = j + 1; j < group_size; j++) {
765                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
766                                 group_size--;
767                                 pipe_set[j] = pipe_set[group_size];
768                                 j--;
769                         }
770                 }
771
772                 if (group_size > 1) {
773                         core_dc->hwss.enable_timing_synchronization(
774                                 core_dc, group_index, group_size, pipe_set);
775                         group_index++;
776                 }
777         }
778 }
779
780 static bool streams_changed(
781                 struct core_dc *dc,
782                 const struct dc_stream *streams[],
783                 uint8_t stream_count)
784 {
785         uint8_t i;
786
787         if (stream_count != dc->current_context->stream_count)
788                 return true;
789
790         for (i = 0; i < dc->current_context->stream_count; i++) {
791                 if (&dc->current_context->streams[i]->public != streams[i])
792                         return true;
793         }
794
795         return false;
796 }
797
798 static void fill_display_configs(
799         const struct validate_context *context,
800         struct dm_pp_display_configuration *pp_display_cfg)
801 {
802         int j;
803         int num_cfgs = 0;
804
805         for (j = 0; j < context->stream_count; j++) {
806                 int k;
807
808                 const struct core_stream *stream = context->streams[j];
809                 struct dm_pp_single_disp_config *cfg =
810                         &pp_display_cfg->disp_configs[num_cfgs];
811                 const struct pipe_ctx *pipe_ctx = NULL;
812
813                 for (k = 0; k < MAX_PIPES; k++)
814                         if (stream == context->res_ctx.pipe_ctx[k].stream) {
815                                 pipe_ctx = &context->res_ctx.pipe_ctx[k];
816                                 break;
817                         }
818
819                 ASSERT(pipe_ctx != NULL);
820
821                 num_cfgs++;
822                 cfg->signal = pipe_ctx->stream->signal;
823                 cfg->pipe_idx = pipe_ctx->pipe_idx;
824                 cfg->src_height = stream->public.src.height;
825                 cfg->src_width = stream->public.src.width;
826                 cfg->ddi_channel_mapping =
827                         stream->sink->link->ddi_channel_mapping.raw;
828                 cfg->transmitter =
829                         stream->sink->link->link_enc->transmitter;
830                 cfg->link_settings.lane_count =
831                         stream->sink->link->public.cur_link_settings.lane_count;
832                 cfg->link_settings.link_rate =
833                         stream->sink->link->public.cur_link_settings.link_rate;
834                 cfg->link_settings.link_spread =
835                         stream->sink->link->public.cur_link_settings.link_spread;
836                 cfg->sym_clock = stream->phy_pix_clk;
837                 /* Round v_refresh*/
838                 cfg->v_refresh = stream->public.timing.pix_clk_khz * 1000;
839                 cfg->v_refresh /= stream->public.timing.h_total;
840                 cfg->v_refresh = (cfg->v_refresh + stream->public.timing.v_total / 2)
841                                                         / stream->public.timing.v_total;
842         }
843
844         pp_display_cfg->display_count = num_cfgs;
845 }
846
847 static uint32_t get_min_vblank_time_us(const struct validate_context *context)
848 {
849         uint8_t j;
850         uint32_t min_vertical_blank_time = -1;
851
852                 for (j = 0; j < context->stream_count; j++) {
853                         const struct dc_stream *stream = &context->streams[j]->public;
854                         uint32_t vertical_blank_in_pixels = 0;
855                         uint32_t vertical_blank_time = 0;
856
857                         vertical_blank_in_pixels = stream->timing.h_total *
858                                 (stream->timing.v_total
859                                         - stream->timing.v_addressable);
860
861                         vertical_blank_time = vertical_blank_in_pixels
862                                 * 1000 / stream->timing.pix_clk_khz;
863
864                         if (min_vertical_blank_time > vertical_blank_time)
865                                 min_vertical_blank_time = vertical_blank_time;
866                 }
867
868         return min_vertical_blank_time;
869 }
870
871 static int determine_sclk_from_bounding_box(
872                 const struct core_dc *dc,
873                 int required_sclk)
874 {
875         int i;
876
877         /*
878          * Some asics do not give us sclk levels, so we just report the actual
879          * required sclk
880          */
881         if (dc->sclk_lvls.num_levels == 0)
882                 return required_sclk;
883
884         for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
885                 if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
886                         return dc->sclk_lvls.clocks_in_khz[i];
887         }
888         /*
889          * even maximum level could not satisfy requirement, this
890          * is unexpected at this stage, should have been caught at
891          * validation time
892          */
893         ASSERT(0);
894         return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
895 }
896
897 void pplib_apply_display_requirements(
898         struct core_dc *dc,
899         const struct validate_context *context,
900         struct dm_pp_display_configuration *pp_display_cfg)
901 {
902         pp_display_cfg->all_displays_in_sync =
903                 context->bw_results.all_displays_in_sync;
904         pp_display_cfg->nb_pstate_switch_disable =
905                         context->bw_results.nbp_state_change_enable == false;
906         pp_display_cfg->cpu_cc6_disable =
907                         context->bw_results.cpuc_state_change_enable == false;
908         pp_display_cfg->cpu_pstate_disable =
909                         context->bw_results.cpup_state_change_enable == false;
910         pp_display_cfg->cpu_pstate_separation_time =
911                         context->bw_results.blackout_recovery_time_us;
912
913         pp_display_cfg->min_memory_clock_khz = context->bw_results.required_yclk
914                 / MEMORY_TYPE_MULTIPLIER;
915
916         pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
917                         dc,
918                         context->bw_results.required_sclk);
919
920         pp_display_cfg->min_engine_clock_deep_sleep_khz
921                         = context->bw_results.required_sclk_deep_sleep;
922
923         pp_display_cfg->avail_mclk_switch_time_us =
924                                                 get_min_vblank_time_us(context);
925         /* TODO: dce11.2*/
926         pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
927
928         pp_display_cfg->disp_clk_khz = context->dispclk_khz;
929
930         fill_display_configs(context, pp_display_cfg);
931
932         /* TODO: is this still applicable?*/
933         if (pp_display_cfg->display_count == 1) {
934                 const struct dc_crtc_timing *timing =
935                         &context->streams[0]->public.timing;
936
937                 pp_display_cfg->crtc_index =
938                         pp_display_cfg->disp_configs[0].pipe_idx;
939                 pp_display_cfg->line_time_in_us = timing->h_total * 1000
940                                                         / timing->pix_clk_khz;
941         }
942
943         if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
944                         struct dm_pp_display_configuration)) !=  0)
945                 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
946
947         dc->prev_display_config = *pp_display_cfg;
948
949 }
950
951 bool dc_commit_streams(
952         struct dc *dc,
953         const struct dc_stream *streams[],
954         uint8_t stream_count)
955 {
956         struct core_dc *core_dc = DC_TO_CORE(dc);
957         struct dc_bios *dcb = core_dc->ctx->dc_bios;
958         enum dc_status result = DC_ERROR_UNEXPECTED;
959         struct validate_context *context;
960         struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
961         int i, j, k;
962
963         if (false == streams_changed(core_dc, streams, stream_count))
964                 return DC_OK;
965
966         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
967                                 __func__, stream_count);
968
969         for (i = 0; i < stream_count; i++) {
970                 const struct dc_stream *stream = streams[i];
971                 const struct dc_stream_status *status = dc_stream_get_status(stream);
972                 int j;
973
974                 dc_stream_log(stream,
975                                 core_dc->ctx->logger,
976                                 LOG_DC);
977
978                 set[i].stream = stream;
979
980                 if (status) {
981                         set[i].surface_count = status->surface_count;
982                         for (j = 0; j < status->surface_count; j++)
983                                 set[i].surfaces[j] = status->surfaces[j];
984                 }
985
986         }
987
988         context = dm_alloc(sizeof(struct validate_context));
989         if (context == NULL)
990                 goto context_alloc_fail;
991
992         result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
993         if (result != DC_OK){
994                 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
995                                         "%s: Context validation failed! dc_status:%d\n",
996                                         __func__,
997                                         result);
998                 BREAK_TO_DEBUGGER();
999                 resource_validate_ctx_destruct(context);
1000                 goto fail;
1001         }
1002
1003         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1004                 core_dc->hwss.enable_accelerated_mode(core_dc);
1005         }
1006
1007         if (result == DC_OK) {
1008                 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
1009         }
1010
1011         program_timing_sync(core_dc, context);
1012
1013         for (i = 0; i < context->stream_count; i++) {
1014                 const struct core_sink *sink = context->streams[i]->sink;
1015
1016                 for (j = 0; j < context->stream_status[i].surface_count; j++) {
1017                         const struct dc_surface *dc_surface =
1018                                         context->stream_status[i].surfaces[j];
1019
1020                         for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
1021                                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
1022
1023                                 if (dc_surface != &pipe->surface->public
1024                                                 || !dc_surface->visible)
1025                                         continue;
1026
1027                                 pipe->tg->funcs->set_blank(pipe->tg, false);
1028                         }
1029                 }
1030
1031                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
1032                                 context->streams[i]->public.timing.h_addressable,
1033                                 context->streams[i]->public.timing.v_addressable,
1034                                 context->streams[i]->public.timing.h_total,
1035                                 context->streams[i]->public.timing.v_total,
1036                                 context->streams[i]->public.timing.pix_clk_khz);
1037         }
1038
1039         pplib_apply_display_requirements(core_dc,
1040                         context, &context->pp_display_cfg);
1041
1042         resource_validate_ctx_destruct(core_dc->current_context);
1043
1044         if (core_dc->temp_flip_context != core_dc->current_context) {
1045                 dm_free(core_dc->temp_flip_context);
1046                 core_dc->temp_flip_context = core_dc->current_context;
1047         }
1048         core_dc->current_context = context;
1049         memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
1050
1051         return (result == DC_OK);
1052
1053 fail:
1054         dm_free(context);
1055
1056 context_alloc_fail:
1057         return (result == DC_OK);
1058 }
1059
1060 bool dc_pre_update_surfaces_to_stream(
1061                 struct dc *dc,
1062                 const struct dc_surface *const *new_surfaces,
1063                 uint8_t new_surface_count,
1064                 const struct dc_stream *dc_stream)
1065 {
1066         int i, j;
1067         struct core_dc *core_dc = DC_TO_CORE(dc);
1068         int prev_disp_clk = core_dc->current_context->dispclk_khz;
1069         struct dc_stream_status *stream_status = NULL;
1070         struct validate_context *context;
1071         bool ret = true;
1072
1073         pre_surface_trace(dc, new_surfaces, new_surface_count);
1074
1075         if (core_dc->current_context->stream_count == 0)
1076                 return false;
1077
1078         /* Cannot commit surface to a stream that is not commited */
1079         for (i = 0; i < core_dc->current_context->stream_count; i++)
1080                 if (dc_stream == &core_dc->current_context->streams[i]->public)
1081                         break;
1082
1083         if (i == core_dc->current_context->stream_count)
1084                 return false;
1085
1086         stream_status = &core_dc->current_context->stream_status[i];
1087
1088         if (new_surface_count == stream_status->surface_count) {
1089                 bool skip_pre = true;
1090
1091                 for (i = 0; i < stream_status->surface_count; i++) {
1092                         struct dc_surface temp_surf = { 0 };
1093
1094                         temp_surf = *stream_status->surfaces[i];
1095                         temp_surf.clip_rect = new_surfaces[i]->clip_rect;
1096                         temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
1097                         temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
1098
1099                         if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
1100                                 skip_pre = false;
1101                                 break;
1102                         }
1103                 }
1104
1105                 if (skip_pre)
1106                         return true;
1107         }
1108
1109         context = dm_alloc(sizeof(struct validate_context));
1110
1111         if (!context) {
1112                 dm_error("%s: failed to create validate ctx\n", __func__);
1113                 ret = false;
1114                 goto val_ctx_fail;
1115         }
1116
1117         resource_validate_ctx_copy_construct(core_dc->current_context, context);
1118
1119         dm_logger_write(core_dc->ctx->logger, LOG_DC,
1120                                 "%s: commit %d surfaces to stream 0x%x\n",
1121                                 __func__,
1122                                 new_surface_count,
1123                                 dc_stream);
1124
1125         if (!resource_attach_surfaces_to_context(
1126                         new_surfaces, new_surface_count, dc_stream, context)) {
1127                 BREAK_TO_DEBUGGER();
1128                 ret = false;
1129                 goto unexpected_fail;
1130         }
1131
1132         for (i = 0; i < new_surface_count; i++)
1133                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1134                         if (context->res_ctx.pipe_ctx[j].surface !=
1135                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
1136                                 continue;
1137
1138                         resource_build_scaling_params(
1139                                 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
1140
1141                         if (dc->debug.surface_visual_confirm) {
1142                                 context->res_ctx.pipe_ctx[j].scl_data.recout.height -= 2;
1143                                 context->res_ctx.pipe_ctx[j].scl_data.recout.width -= 2;
1144                         }
1145                 }
1146
1147         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1148                 BREAK_TO_DEBUGGER();
1149                 ret = false;
1150                 goto unexpected_fail;
1151         }
1152
1153         if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)
1154                         && prev_disp_clk < context->dispclk_khz) {
1155                 pplib_apply_display_requirements(core_dc, context,
1156                                                 &context->pp_display_cfg);
1157                 context->res_ctx.pool->display_clock->funcs->set_clock(
1158                                 context->res_ctx.pool->display_clock,
1159                                 context->dispclk_khz * 115 / 100);
1160                 core_dc->current_context->bw_results.dispclk_khz = context->dispclk_khz;
1161                 core_dc->current_context->dispclk_khz = context->dispclk_khz;
1162         }
1163
1164         for (i = 0; i < new_surface_count; i++)
1165                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1166                         if (context->res_ctx.pipe_ctx[j].surface !=
1167                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
1168                                 continue;
1169
1170                         core_dc->hwss.prepare_pipe_for_context(
1171                                         core_dc,
1172                                         &context->res_ctx.pipe_ctx[j],
1173                                         context);
1174                 }
1175
1176 unexpected_fail:
1177         resource_validate_ctx_destruct(context);
1178         dm_free(context);
1179 val_ctx_fail:
1180
1181         return ret;
1182 }
1183
1184 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1185 {
1186         int i;
1187         struct core_dc *core_dc = DC_TO_CORE(dc);
1188         struct validate_context *context = dm_alloc(sizeof(struct validate_context));
1189
1190         if (!context) {
1191                 dm_error("%s: failed to create validate ctx\n", __func__);
1192                 return false;
1193         }
1194         resource_validate_ctx_copy_construct(core_dc->current_context, context);
1195
1196         post_surface_trace(dc);
1197
1198         for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
1199                 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1200                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1201                         core_dc->hwss.power_down_front_end(
1202                                         core_dc, &context->res_ctx.pipe_ctx[i]);
1203                 }
1204         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1205                 BREAK_TO_DEBUGGER();
1206                 return false;
1207         }
1208
1209         core_dc->hwss.set_bandwidth(core_dc);
1210
1211         /*TODO: dce specific*/
1212         pplib_apply_display_requirements(core_dc, context, &context->pp_display_cfg);
1213
1214         resource_validate_ctx_destruct(core_dc->current_context);
1215         core_dc->current_context = context;
1216
1217         return true;
1218 }
1219
1220 bool dc_commit_surfaces_to_stream(
1221                 struct dc *dc,
1222                 const struct dc_surface **new_surfaces,
1223                 uint8_t new_surface_count,
1224                 const struct dc_stream *dc_stream)
1225 {
1226         struct dc_surface_update updates[MAX_SURFACES];
1227         struct dc_flip_addrs flip_addr[MAX_SURFACES];
1228         struct dc_plane_info plane_info[MAX_SURFACES];
1229         struct dc_scaling_info scaling_info[MAX_SURFACES];
1230         int i;
1231
1232         if (!dc_pre_update_surfaces_to_stream(
1233                         dc, new_surfaces, new_surface_count, dc_stream))
1234                 return false;
1235
1236         memset(updates, 0, sizeof(updates));
1237         memset(flip_addr, 0, sizeof(flip_addr));
1238         memset(plane_info, 0, sizeof(plane_info));
1239         memset(scaling_info, 0, sizeof(scaling_info));
1240
1241         for (i = 0; i < new_surface_count; i++) {
1242                 updates[i].surface = new_surfaces[i];
1243                 updates[i].gamma =
1244                         (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1245                 flip_addr[i].address = new_surfaces[i]->address;
1246                 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1247                 plane_info[i].color_space = new_surfaces[i]->color_space;
1248                 plane_info[i].format = new_surfaces[i]->format;
1249                 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1250                 plane_info[i].rotation = new_surfaces[i]->rotation;
1251                 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1252                 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1253                 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1254                 plane_info[i].visible = new_surfaces[i]->visible;
1255                 plane_info[i].dcc = new_surfaces[i]->dcc;
1256                 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1257                 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1258                 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1259                 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1260
1261                 updates[i].flip_addr = &flip_addr[i];
1262                 updates[i].plane_info = &plane_info[i];
1263                 updates[i].scaling_info = &scaling_info[i];
1264         }
1265         dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
1266
1267         return dc_post_update_surfaces_to_stream(dc);
1268 }
1269
1270 static bool is_surface_in_context(
1271                 const struct validate_context *context,
1272                 const struct dc_surface *surface)
1273 {
1274         int j;
1275
1276         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1277                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1278
1279                 if (surface == &pipe_ctx->surface->public) {
1280                         return true;
1281                 }
1282         }
1283
1284         return false;
1285 }
1286
1287 enum surface_update_type {
1288         UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
1289         UPDATE_TYPE_MED,  /* a lot of programming needed.  may need to alloc */
1290         UPDATE_TYPE_FULL, /* may need to shuffle resources */
1291 };
1292
1293 static enum surface_update_type det_surface_update(
1294                 const struct core_dc *dc,
1295                 const struct dc_surface_update *u)
1296 {
1297         const struct validate_context *context = dc->current_context;
1298
1299         if (u->scaling_info || u->plane_info)
1300                 /* todo: not all scale and plane_info update need full update
1301                  * ie. check if following is the same
1302                  * scale ratio, view port, surface bpp etc
1303                  */
1304                 return UPDATE_TYPE_FULL; /* may need bandwidth update */
1305
1306         if (!is_surface_in_context(context, u->surface))
1307                 return UPDATE_TYPE_FULL;
1308
1309         if (u->in_transfer_func ||
1310                 u->out_transfer_func ||
1311                 u->hdr_static_metadata)
1312                 return UPDATE_TYPE_MED;
1313
1314         return UPDATE_TYPE_FAST;
1315 }
1316
1317 static enum surface_update_type check_update_surfaces_for_stream(
1318                 struct core_dc *dc,
1319                 struct dc_surface_update *updates,
1320                 int surface_count,
1321                 const struct dc_stream_status *stream_status)
1322 {
1323         int i;
1324         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1325
1326         if (stream_status->surface_count != surface_count)
1327                 return UPDATE_TYPE_FULL;
1328
1329         for (i = 0 ; i < surface_count; i++) {
1330                 enum surface_update_type type =
1331                                 det_surface_update(dc, &updates[i]);
1332
1333                 if (type == UPDATE_TYPE_FULL)
1334                         return type;
1335
1336                 if (overall_type < type)
1337                         overall_type = type;
1338         }
1339
1340         return overall_type;
1341 }
1342
1343 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1344
1345 void dc_update_surfaces_for_stream(struct dc *dc,
1346                 struct dc_surface_update *updates, int surface_count,
1347                 const struct dc_stream *dc_stream)
1348 {
1349         struct core_dc *core_dc = DC_TO_CORE(dc);
1350         struct validate_context *context;
1351         int i, j;
1352
1353         enum surface_update_type update_type;
1354         const struct dc_stream_status *stream_status;
1355
1356         stream_status = dc_stream_get_status(dc_stream);
1357         ASSERT(stream_status);
1358         if (!stream_status)
1359                 return; /* Cannot commit surface to stream that is not committed */
1360
1361         update_type = check_update_surfaces_for_stream(
1362                         core_dc, updates, surface_count, stream_status);
1363
1364         if (update_type >= update_surface_trace_level)
1365                 update_surface_trace(dc, updates, surface_count);
1366
1367         if (update_type >= UPDATE_TYPE_FULL) {
1368                 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1369
1370                 for (i = 0; i < surface_count; i++)
1371                         new_surfaces[i] = updates[i].surface;
1372
1373                 /* initialize scratch memory for building context */
1374                 context = core_dc->temp_flip_context;
1375                 resource_validate_ctx_copy_construct(
1376                                 core_dc->current_context, context);
1377
1378                 /* add surface to context */
1379                 if (!resource_attach_surfaces_to_context(
1380                                 new_surfaces, surface_count, dc_stream, context)) {
1381                         BREAK_TO_DEBUGGER();
1382                         return;
1383                 }
1384         } else {
1385                 context = core_dc->current_context;
1386         }
1387         for (i = 0; i < surface_count; i++) {
1388                 /* save update param into surface */
1389                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1390                 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1391
1392                 if (updates[i].flip_addr) {
1393                         surface->public.address = updates[i].flip_addr->address;
1394                         surface->public.flip_immediate =
1395                                         updates[i].flip_addr->flip_immediate;
1396                 }
1397
1398                 if (updates[i].scaling_info) {
1399                         surface->public.scaling_quality =
1400                                         updates[i].scaling_info->scaling_quality;
1401                         surface->public.dst_rect =
1402                                         updates[i].scaling_info->dst_rect;
1403                         surface->public.src_rect =
1404                                         updates[i].scaling_info->src_rect;
1405                         surface->public.clip_rect =
1406                                         updates[i].scaling_info->clip_rect;
1407                 }
1408
1409                 if (updates[i].plane_info) {
1410                         surface->public.color_space =
1411                                         updates[i].plane_info->color_space;
1412                         surface->public.format =
1413                                         updates[i].plane_info->format;
1414                         surface->public.plane_size =
1415                                         updates[i].plane_info->plane_size;
1416                         surface->public.rotation =
1417                                         updates[i].plane_info->rotation;
1418                         surface->public.horizontal_mirror =
1419                                         updates[i].plane_info->horizontal_mirror;
1420                         surface->public.stereo_format =
1421                                         updates[i].plane_info->stereo_format;
1422                         surface->public.tiling_info =
1423                                         updates[i].plane_info->tiling_info;
1424                         surface->public.visible =
1425                                         updates[i].plane_info->visible;
1426                         surface->public.dcc =
1427                                         updates[i].plane_info->dcc;
1428                 }
1429
1430                 /* not sure if we still need this */
1431                 if (update_type == UPDATE_TYPE_FULL) {
1432                         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1433                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1434
1435                                 if (pipe_ctx->surface != surface)
1436                                         continue;
1437
1438                                 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1439                                 if (dc->debug.surface_visual_confirm) {
1440                                         pipe_ctx->scl_data.recout.height -= 2;
1441                                         pipe_ctx->scl_data.recout.width -= 2;
1442                                 }
1443                         }
1444                 }
1445
1446                 if (updates[i].gamma &&
1447                         updates[i].gamma != surface->public.gamma_correction) {
1448                         if (surface->public.gamma_correction != NULL)
1449                                 dc_gamma_release(&surface->public.
1450                                                 gamma_correction);
1451
1452                         dc_gamma_retain(updates[i].gamma);
1453                         surface->public.gamma_correction =
1454                                                 updates[i].gamma;
1455                 }
1456
1457                 if (updates[i].in_transfer_func &&
1458                         updates[i].in_transfer_func != surface->public.in_transfer_func) {
1459                         if (surface->public.in_transfer_func != NULL)
1460                                 dc_transfer_func_release(
1461                                                 surface->public.
1462                                                 in_transfer_func);
1463
1464                         dc_transfer_func_retain(
1465                                         updates[i].in_transfer_func);
1466                         surface->public.in_transfer_func =
1467                                         updates[i].in_transfer_func;
1468                 }
1469
1470                 if (updates[i].out_transfer_func &&
1471                         updates[i].out_transfer_func != dc_stream->out_transfer_func) {
1472                         if (dc_stream->out_transfer_func != NULL)
1473                                 dc_transfer_func_release(dc_stream->out_transfer_func);
1474                         dc_transfer_func_retain(updates[i].out_transfer_func);
1475                         stream->public.out_transfer_func = updates[i].out_transfer_func;
1476                 }
1477                 if (updates[i].hdr_static_metadata)
1478                         surface->public.hdr_static_ctx =
1479                                 *(updates[i].hdr_static_metadata);
1480         }
1481
1482         if (update_type == UPDATE_TYPE_FULL &&
1483                         !core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1484                 BREAK_TO_DEBUGGER();
1485                 return;
1486         }
1487
1488         if (!surface_count)  /* reset */
1489                 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1490
1491         for (i = 0; i < surface_count; i++) {
1492                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1493
1494                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1495                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1496                         struct pipe_ctx *cur_pipe_ctx;
1497                         bool is_new_pipe_surface = true;
1498
1499                         if (pipe_ctx->surface != surface)
1500                                 continue;
1501
1502                         if (update_type != UPDATE_TYPE_FAST &&
1503                                 !pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1504                                 core_dc->hwss.pipe_control_lock(
1505                                                 core_dc->hwseq,
1506                                                 pipe_ctx->pipe_idx,
1507                                                 PIPE_LOCK_CONTROL_GRAPHICS |
1508                                                 PIPE_LOCK_CONTROL_SCL |
1509                                                 PIPE_LOCK_CONTROL_BLENDER |
1510                                                 PIPE_LOCK_CONTROL_MODE,
1511                                                 true);
1512                         }
1513
1514                         if (update_type == UPDATE_TYPE_FULL) {
1515                                 /* only apply for top pipe */
1516                                 if (!pipe_ctx->top_pipe) {
1517                                         core_dc->hwss.apply_ctx_for_surface(core_dc,
1518                                                          surface, context);
1519                                         context_timing_trace(dc, &context->res_ctx);
1520                                 }
1521                         }
1522
1523                         if (updates[i].flip_addr)
1524                                 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1525
1526                         if (update_type == UPDATE_TYPE_FAST)
1527                                 continue;
1528
1529                         cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1530                         if (cur_pipe_ctx->surface == pipe_ctx->surface)
1531                                 is_new_pipe_surface = false;
1532
1533                         if (is_new_pipe_surface ||
1534                                         updates[i].in_transfer_func)
1535                                 core_dc->hwss.set_input_transfer_func(
1536                                                 pipe_ctx, pipe_ctx->surface);
1537
1538                         if (is_new_pipe_surface ||
1539                                         updates[i].out_transfer_func)
1540                                 core_dc->hwss.set_output_transfer_func(
1541                                                 pipe_ctx,
1542                                                 pipe_ctx->surface,
1543                                                 pipe_ctx->stream);
1544
1545                         if (updates[i].hdr_static_metadata) {
1546                                 resource_build_info_frame(pipe_ctx);
1547                                 core_dc->hwss.update_info_frame(pipe_ctx);
1548                         }
1549                 }
1550         }
1551
1552         if (update_type == UPDATE_TYPE_FAST)
1553                 return;
1554
1555         for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1556                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1557
1558                 for (j = 0; j < surface_count; j++) {
1559                         if (updates[j].surface == &pipe_ctx->surface->public) {
1560                                 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1561                                         core_dc->hwss.pipe_control_lock(
1562                                                         core_dc->hwseq,
1563                                                         pipe_ctx->pipe_idx,
1564                                                         PIPE_LOCK_CONTROL_GRAPHICS |
1565                                                         PIPE_LOCK_CONTROL_SCL |
1566                                                         PIPE_LOCK_CONTROL_BLENDER,
1567                                                         false);
1568                                 }
1569                                 break;
1570                         }
1571                 }
1572         }
1573
1574         if (core_dc->current_context != context) {
1575                 resource_validate_ctx_destruct(core_dc->current_context);
1576                 core_dc->temp_flip_context = core_dc->current_context;
1577
1578                 core_dc->current_context = context;
1579         }
1580 }
1581
1582 uint8_t dc_get_current_stream_count(const struct dc *dc)
1583 {
1584         struct core_dc *core_dc = DC_TO_CORE(dc);
1585         return core_dc->current_context->stream_count;
1586 }
1587
1588 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1589 {
1590         struct core_dc *core_dc = DC_TO_CORE(dc);
1591         if (i < core_dc->current_context->stream_count)
1592                 return &(core_dc->current_context->streams[i]->public);
1593         return NULL;
1594 }
1595
1596 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1597 {
1598         struct core_dc *core_dc = DC_TO_CORE(dc);
1599         return &core_dc->links[link_index]->public;
1600 }
1601
1602 const struct graphics_object_id dc_get_link_id_at_index(
1603         struct dc *dc, uint32_t link_index)
1604 {
1605         struct core_dc *core_dc = DC_TO_CORE(dc);
1606         return core_dc->links[link_index]->link_id;
1607 }
1608
1609 const struct ddc_service *dc_get_ddc_at_index(
1610         struct dc *dc, uint32_t link_index)
1611 {
1612         struct core_dc *core_dc = DC_TO_CORE(dc);
1613         return core_dc->links[link_index]->ddc;
1614 }
1615
1616 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1617         struct dc *dc, uint32_t link_index)
1618 {
1619         struct core_dc *core_dc = DC_TO_CORE(dc);
1620         return core_dc->links[link_index]->public.irq_source_hpd;
1621 }
1622
1623 const struct audio **dc_get_audios(struct dc *dc)
1624 {
1625         struct core_dc *core_dc = DC_TO_CORE(dc);
1626         return (const struct audio **)core_dc->res_pool->audios;
1627 }
1628
1629 void dc_flip_surface_addrs(
1630                 struct dc *dc,
1631                 const struct dc_surface *const surfaces[],
1632                 struct dc_flip_addrs flip_addrs[],
1633                 uint32_t count)
1634 {
1635         struct core_dc *core_dc = DC_TO_CORE(dc);
1636         int i, j;
1637
1638         for (i = 0; i < count; i++) {
1639                 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1640
1641                 surface->public.address = flip_addrs[i].address;
1642                 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1643
1644                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1645                         struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1646
1647                         if (pipe_ctx->surface != surface)
1648                                 continue;
1649
1650                         core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1651                 }
1652         }
1653 }
1654
1655 enum dc_irq_source dc_interrupt_to_irq_source(
1656                 struct dc *dc,
1657                 uint32_t src_id,
1658                 uint32_t ext_id)
1659 {
1660         struct core_dc *core_dc = DC_TO_CORE(dc);
1661         return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1662 }
1663
1664 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1665 {
1666         struct core_dc *core_dc = DC_TO_CORE(dc);
1667         dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1668 }
1669
1670 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1671 {
1672         struct core_dc *core_dc = DC_TO_CORE(dc);
1673         dal_irq_service_ack(core_dc->res_pool->irqs, src);
1674 }
1675
1676 void dc_set_power_state(
1677         struct dc *dc,
1678         enum dc_acpi_cm_power_state power_state,
1679         enum dc_video_power_state video_power_state)
1680 {
1681         struct core_dc *core_dc = DC_TO_CORE(dc);
1682
1683         core_dc->previous_power_state = core_dc->current_power_state;
1684         core_dc->current_power_state = video_power_state;
1685
1686         switch (power_state) {
1687         case DC_ACPI_CM_POWER_STATE_D0:
1688                 core_dc->hwss.init_hw(core_dc);
1689                 break;
1690         default:
1691                 /* NULL means "reset/release all DC streams" */
1692                 dc_commit_streams(dc, NULL, 0);
1693
1694                 core_dc->hwss.power_down(core_dc);
1695
1696                 /* Zero out the current context so that on resume we start with
1697                  * clean state, and dc hw programming optimizations will not
1698                  * cause any trouble.
1699                  */
1700                 memset(core_dc->current_context, 0,
1701                                 sizeof(*core_dc->current_context));
1702
1703                 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1704
1705                 break;
1706         }
1707
1708 }
1709
1710 void dc_resume(const struct dc *dc)
1711 {
1712         struct core_dc *core_dc = DC_TO_CORE(dc);
1713
1714         uint32_t i;
1715
1716         for (i = 0; i < core_dc->link_count; i++)
1717                 core_link_resume(core_dc->links[i]);
1718 }
1719
1720 bool dc_read_dpcd(
1721                 struct dc *dc,
1722                 uint32_t link_index,
1723                 uint32_t address,
1724                 uint8_t *data,
1725                 uint32_t size)
1726 {
1727         struct core_dc *core_dc = DC_TO_CORE(dc);
1728
1729         struct core_link *link = core_dc->links[link_index];
1730         enum ddc_result r = dal_ddc_service_read_dpcd_data(
1731                         link->ddc,
1732                         address,
1733                         data,
1734                         size);
1735         return r == DDC_RESULT_SUCESSFULL;
1736 }
1737
1738 bool dc_query_ddc_data(
1739                 struct dc *dc,
1740                 uint32_t link_index,
1741                 uint32_t address,
1742                 uint8_t *write_buf,
1743                 uint32_t write_size,
1744                 uint8_t *read_buf,
1745                 uint32_t read_size) {
1746
1747         struct core_dc *core_dc = DC_TO_CORE(dc);
1748
1749         struct core_link *link = core_dc->links[link_index];
1750
1751         bool result = dal_ddc_service_query_ddc_data(
1752                         link->ddc,
1753                         address,
1754                         write_buf,
1755                         write_size,
1756                         read_buf,
1757                         read_size);
1758
1759         return result;
1760 }
1761
1762
1763 bool dc_write_dpcd(
1764                 struct dc *dc,
1765                 uint32_t link_index,
1766                 uint32_t address,
1767                 const uint8_t *data,
1768                 uint32_t size)
1769 {
1770         struct core_dc *core_dc = DC_TO_CORE(dc);
1771
1772         struct core_link *link = core_dc->links[link_index];
1773
1774         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1775                         link->ddc,
1776                         address,
1777                         data,
1778                         size);
1779         return r == DDC_RESULT_SUCESSFULL;
1780 }
1781
1782 bool dc_submit_i2c(
1783                 struct dc *dc,
1784                 uint32_t link_index,
1785                 struct i2c_command *cmd)
1786 {
1787         struct core_dc *core_dc = DC_TO_CORE(dc);
1788
1789         struct core_link *link = core_dc->links[link_index];
1790         struct ddc_service *ddc = link->ddc;
1791
1792         return dal_i2caux_submit_i2c_command(
1793                 ddc->ctx->i2caux,
1794                 ddc->ddc_pin,
1795                 cmd);
1796 }
1797
1798 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1799 {
1800         struct dc_link *dc_link = &core_link->public;
1801
1802         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1803                 BREAK_TO_DEBUGGER();
1804                 return false;
1805         }
1806
1807         dc_sink_retain(sink);
1808
1809         dc_link->remote_sinks[dc_link->sink_count] = sink;
1810         dc_link->sink_count++;
1811
1812         return true;
1813 }
1814
1815 struct dc_sink *dc_link_add_remote_sink(
1816                 const struct dc_link *link,
1817                 const uint8_t *edid,
1818                 int len,
1819                 struct dc_sink_init_data *init_data)
1820 {
1821         struct dc_sink *dc_sink;
1822         enum dc_edid_status edid_status;
1823         struct core_link *core_link = DC_LINK_TO_LINK(link);
1824
1825         if (len > MAX_EDID_BUFFER_SIZE) {
1826                 dm_error("Max EDID buffer size breached!\n");
1827                 return NULL;
1828         }
1829
1830         if (!init_data) {
1831                 BREAK_TO_DEBUGGER();
1832                 return NULL;
1833         }
1834
1835         if (!init_data->link) {
1836                 BREAK_TO_DEBUGGER();
1837                 return NULL;
1838         }
1839
1840         dc_sink = dc_sink_create(init_data);
1841
1842         if (!dc_sink)
1843                 return NULL;
1844
1845         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1846         dc_sink->dc_edid.length = len;
1847
1848         if (!link_add_remote_sink_helper(
1849                         core_link,
1850                         dc_sink))
1851                 goto fail_add_sink;
1852
1853         edid_status = dm_helpers_parse_edid_caps(
1854                         core_link->ctx,
1855                         &dc_sink->dc_edid,
1856                         &dc_sink->edid_caps);
1857
1858         if (edid_status != EDID_OK)
1859                 goto fail;
1860
1861         return dc_sink;
1862 fail:
1863         dc_link_remove_remote_sink(link, dc_sink);
1864 fail_add_sink:
1865         dc_sink_release(dc_sink);
1866         return NULL;
1867 }
1868
1869 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1870 {
1871         struct core_link *core_link = DC_LINK_TO_LINK(link);
1872         struct dc_link *dc_link = &core_link->public;
1873
1874         dc_link->local_sink = sink;
1875
1876         if (sink == NULL) {
1877                 dc_link->type = dc_connection_none;
1878         } else {
1879                 dc_link->type = dc_connection_single;
1880         }
1881 }
1882
1883 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1884 {
1885         int i;
1886         struct core_link *core_link = DC_LINK_TO_LINK(link);
1887         struct dc_link *dc_link = &core_link->public;
1888
1889         if (!link->sink_count) {
1890                 BREAK_TO_DEBUGGER();
1891                 return;
1892         }
1893
1894         for (i = 0; i < dc_link->sink_count; i++) {
1895                 if (dc_link->remote_sinks[i] == sink) {
1896                         dc_sink_release(sink);
1897                         dc_link->remote_sinks[i] = NULL;
1898
1899                         /* shrink array to remove empty place */
1900                         while (i < dc_link->sink_count - 1) {
1901                                 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1902                                 i++;
1903                         }
1904
1905                         dc_link->sink_count--;
1906                         return;
1907                 }
1908         }
1909 }
1910