drm/amd/display: freesync pipe split :VTotal_Min_Mask for Hflip/lock.
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "bandwidth_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51
52 /*******************************************************************************
53  * Private functions
54  ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
56 {
57         uint32_t i;
58
59         for (i = 0; i < dc->link_count; i++) {
60                 if (NULL != dc->links[i])
61                         link_destroy(&dc->links[i]);
62         }
63 }
64
65 static bool create_links(
66                 struct core_dc *dc,
67                 uint32_t num_virtual_links)
68 {
69         int i;
70         int connectors_num;
71         struct dc_bios *bios = dc->ctx->dc_bios;
72
73         dc->link_count = 0;
74
75         connectors_num = bios->funcs->get_connectors_number(bios);
76
77         if (connectors_num > ENUM_ID_COUNT) {
78                 dm_error(
79                         "DC: Number of connectors %d exceeds maximum of %d!\n",
80                         connectors_num,
81                         ENUM_ID_COUNT);
82                 return false;
83         }
84
85         if (connectors_num == 0 && num_virtual_links == 0) {
86                 dm_error("DC: Number of connectors is zero!\n");
87         }
88
89         dm_output_to_console(
90                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91                 __func__,
92                 connectors_num,
93                 num_virtual_links);
94
95         for (i = 0; i < connectors_num; i++) {
96                 struct link_init_data link_init_params = {0};
97                 struct core_link *link;
98
99                 link_init_params.ctx = dc->ctx;
100                 link_init_params.connector_index = i;
101                 link_init_params.link_index = dc->link_count;
102                 link_init_params.dc = dc;
103                 link = link_create(&link_init_params);
104
105                 if (link) {
106                         dc->links[dc->link_count] = link;
107                         link->dc = dc;
108                         ++dc->link_count;
109                 } else {
110                         dm_error("DC: failed to create link!\n");
111                 }
112         }
113
114         for (i = 0; i < num_virtual_links; i++) {
115                 struct core_link *link = dm_alloc(sizeof(*link));
116                 struct encoder_init_data enc_init = {0};
117
118                 if (link == NULL) {
119                         BREAK_TO_DEBUGGER();
120                         goto failed_alloc;
121                 }
122
123                 link->ctx = dc->ctx;
124                 link->dc = dc;
125                 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
126                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
127                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
128                 link->link_id.enum_id = ENUM_ID_1;
129                 link->link_enc = dm_alloc(sizeof(*link->link_enc));
130
131                 enc_init.ctx = dc->ctx;
132                 enc_init.channel = CHANNEL_ID_UNKNOWN;
133                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
134                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
135                 enc_init.connector = link->link_id;
136                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
137                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
138                 enc_init.encoder.enum_id = ENUM_ID_1;
139                 virtual_link_encoder_construct(link->link_enc, &enc_init);
140
141                 link->public.link_index = dc->link_count;
142                 dc->links[dc->link_count] = link;
143                 dc->link_count++;
144         }
145
146         return true;
147
148 failed_alloc:
149         return false;
150 }
151
152 static bool stream_adjust_vmin_vmax(struct dc *dc,
153                 const struct dc_stream **stream, int num_streams,
154                 int vmin, int vmax)
155 {
156         /* TODO: Support multiple streams */
157         struct core_dc *core_dc = DC_TO_CORE(dc);
158         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
159         int i = 0;
160         bool ret = false;
161
162         for (i = 0; i < MAX_PIPES; i++) {
163                 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
164
165                 if (pipe->stream == core_stream && pipe->stream_enc) {
166                         core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
167
168                         /* build and update the info frame */
169                         resource_build_info_frame(pipe);
170                         core_dc->hwss.update_info_frame(pipe);
171
172                         ret = true;
173                 }
174         }
175         return ret;
176 }
177
178
179 static bool set_gamut_remap(struct dc *dc,
180                         const struct dc_stream **stream, int num_streams)
181 {
182         struct core_dc *core_dc = DC_TO_CORE(dc);
183         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184         int i = 0;
185         bool ret = false;
186         struct pipe_ctx *pipes;
187
188         for (i = 0; i < MAX_PIPES; i++) {
189                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
190                                 == core_stream) {
191
192                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
193                         core_dc->hwss.set_plane_config(core_dc, pipes,
194                                         &core_dc->current_context->res_ctx);
195                         ret = true;
196                 }
197         }
198
199         return ret;
200 }
201
202 /* This function is not expected to fail, proper implementation of
203  * validation will prevent this from ever being called for unsupported
204  * configurations.
205  */
206 static void stream_update_scaling(
207                 const struct dc *dc,
208                 const struct dc_stream *dc_stream,
209                 const struct rect *src,
210                 const struct rect *dst)
211 {
212         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
213         struct core_dc *core_dc = DC_TO_CORE(dc);
214         struct validate_context *cur_ctx = core_dc->current_context;
215         int i;
216
217         if (src)
218                 stream->public.src = *src;
219
220         if (dst)
221                 stream->public.dst = *dst;
222
223         for (i = 0; i < cur_ctx->stream_count; i++) {
224                 struct core_stream *cur_stream = cur_ctx->streams[i];
225
226                 if (stream == cur_stream) {
227                         struct dc_stream_status *status = &cur_ctx->stream_status[i];
228
229                         if (status->surface_count)
230                                 if (!dc_commit_surfaces_to_stream(
231                                                 &core_dc->public,
232                                                 status->surfaces,
233                                                 status->surface_count,
234                                                 &cur_stream->public))
235                                         /* Need to debug validation */
236                                         BREAK_TO_DEBUGGER();
237
238                         return;
239                 }
240         }
241 }
242
243 static bool set_psr_enable(struct dc *dc, bool enable)
244 {
245         struct core_dc *core_dc = DC_TO_CORE(dc);
246         int i;
247
248         for (i = 0; i < core_dc->link_count; i++)
249                 dc_link_set_psr_enable(&core_dc->links[i]->public,
250                                 enable);
251
252         return true;
253 }
254
255
256 static bool setup_psr(struct dc *dc, const struct dc_stream *stream)
257 {
258         struct core_dc *core_dc = DC_TO_CORE(dc);
259         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
260         struct pipe_ctx *pipes;
261         int i;
262         unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
263
264         for (i = 0; i < core_dc->link_count; i++) {
265                 if (core_stream->sink->link == core_dc->links[i])
266                         dc_link_setup_psr(&core_dc->links[i]->public,
267                                         stream);
268         }
269
270         for (i = 0; i < MAX_PIPES; i++) {
271                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
272                                 == core_stream && i != underlay_idx) {
273                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
274                         core_dc->hwss.set_static_screen_control(&pipes, 1,
275                                         0x182);
276                 }
277         }
278
279         return true;
280 }
281
282 static void set_drive_settings(struct dc *dc,
283                 struct link_training_settings *lt_settings,
284                 const struct dc_link *link)
285 {
286         struct core_dc *core_dc = DC_TO_CORE(dc);
287         int i;
288
289         for (i = 0; i < core_dc->link_count; i++) {
290                 if (&core_dc->links[i]->public == link)
291                         break;
292         }
293
294         if (i >= core_dc->link_count)
295                 ASSERT_CRITICAL(false);
296
297         dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
298 }
299
300 static void perform_link_training(struct dc *dc,
301                 struct dc_link_settings *link_setting,
302                 bool skip_video_pattern)
303 {
304         struct core_dc *core_dc = DC_TO_CORE(dc);
305         int i;
306
307         for (i = 0; i < core_dc->link_count; i++)
308                 dc_link_dp_perform_link_training(
309                         &core_dc->links[i]->public,
310                         link_setting,
311                         skip_video_pattern);
312 }
313
314 static void set_preferred_link_settings(struct dc *dc,
315                 struct dc_link_settings *link_setting,
316                 const struct dc_link *link)
317 {
318         struct core_link *core_link = DC_LINK_TO_CORE(link);
319
320         core_link->public.verified_link_cap.lane_count =
321                                 link_setting->lane_count;
322         core_link->public.verified_link_cap.link_rate =
323                                 link_setting->link_rate;
324         dp_retrain_link_dp_test(core_link, link_setting, false);
325 }
326
327 static void enable_hpd(const struct dc_link *link)
328 {
329         dc_link_dp_enable_hpd(link);
330 }
331
332 static void disable_hpd(const struct dc_link *link)
333 {
334         dc_link_dp_disable_hpd(link);
335 }
336
337
338 static void set_test_pattern(
339                 const struct dc_link *link,
340                 enum dp_test_pattern test_pattern,
341                 const struct link_training_settings *p_link_settings,
342                 const unsigned char *p_custom_pattern,
343                 unsigned int cust_pattern_size)
344 {
345         if (link != NULL)
346                 dc_link_dp_set_test_pattern(
347                         link,
348                         test_pattern,
349                         p_link_settings,
350                         p_custom_pattern,
351                         cust_pattern_size);
352 }
353
354 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
355 {
356         core_dc->public.stream_funcs.stream_update_scaling = stream_update_scaling;
357         if (core_dc->hwss.set_drr != NULL) {
358                 core_dc->public.stream_funcs.adjust_vmin_vmax =
359                                 stream_adjust_vmin_vmax;
360         }
361
362         core_dc->public.stream_funcs.set_gamut_remap =
363                         set_gamut_remap;
364
365         core_dc->public.stream_funcs.set_psr_enable =
366                         set_psr_enable;
367
368         core_dc->public.stream_funcs.setup_psr =
369                         setup_psr;
370
371         core_dc->public.link_funcs.set_drive_settings =
372                         set_drive_settings;
373
374         core_dc->public.link_funcs.perform_link_training =
375                         perform_link_training;
376
377         core_dc->public.link_funcs.set_preferred_link_settings =
378                         set_preferred_link_settings;
379
380         core_dc->public.link_funcs.enable_hpd =
381                         enable_hpd;
382
383         core_dc->public.link_funcs.disable_hpd =
384                         disable_hpd;
385
386         core_dc->public.link_funcs.set_test_pattern =
387                         set_test_pattern;
388 }
389
390 static void destruct(struct core_dc *dc)
391 {
392         resource_validate_ctx_destruct(dc->current_context);
393
394         destroy_links(dc);
395
396         dc_destroy_resource_pool(dc);
397
398         if (dc->ctx->gpio_service)
399                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
400
401         if (dc->ctx->i2caux)
402                 dal_i2caux_destroy(&dc->ctx->i2caux);
403
404         if (dc->ctx->created_bios)
405                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
406
407         if (dc->ctx->logger)
408                 dal_logger_destroy(&dc->ctx->logger);
409
410         dm_free(dc->current_context);
411         dc->current_context = NULL;
412         dm_free(dc->temp_flip_context);
413         dc->temp_flip_context = NULL;
414         dm_free(dc->scratch_val_ctx);
415         dc->scratch_val_ctx = NULL;
416
417         dm_free(dc->ctx);
418         dc->ctx = NULL;
419 }
420
421 static bool construct(struct core_dc *dc,
422                 const struct dc_init_data *init_params)
423 {
424         struct dal_logger *logger;
425         struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
426         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
427
428         if (!dc_ctx) {
429                 dm_error("%s: failed to create ctx\n", __func__);
430                 goto ctx_fail;
431         }
432
433         dc->current_context = dm_alloc(sizeof(*dc->current_context));
434         dc->temp_flip_context = dm_alloc(sizeof(*dc->temp_flip_context));
435         dc->scratch_val_ctx = dm_alloc(sizeof(*dc->scratch_val_ctx));
436
437         if (!dc->current_context || !dc->temp_flip_context) {
438                 dm_error("%s: failed to create validate ctx\n", __func__);
439                 goto val_ctx_fail;
440         }
441
442         dc_ctx->cgs_device = init_params->cgs_device;
443         dc_ctx->driver_context = init_params->driver;
444         dc_ctx->dc = &dc->public;
445         dc_ctx->asic_id = init_params->asic_id;
446
447         /* Create logger */
448         logger = dal_logger_create(dc_ctx);
449
450         if (!logger) {
451                 /* can *not* call logger. call base driver 'print error' */
452                 dm_error("%s: failed to create Logger!\n", __func__);
453                 goto logger_fail;
454         }
455         dc_ctx->logger = logger;
456         dc->ctx = dc_ctx;
457         dc->ctx->dce_environment = init_params->dce_environment;
458
459         dc_version = resource_parse_asic_id(init_params->asic_id);
460         dc->ctx->dce_version = dc_version;
461
462         /* Resource should construct all asic specific resources.
463          * This should be the only place where we need to parse the asic id
464          */
465         if (init_params->vbios_override)
466                 dc_ctx->dc_bios = init_params->vbios_override;
467         else {
468                 /* Create BIOS parser */
469                 struct bp_init_data bp_init_data;
470                 bp_init_data.ctx = dc_ctx;
471                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
472
473                 dc_ctx->dc_bios = dal_bios_parser_create(
474                                 &bp_init_data, dc_version);
475
476                 if (!dc_ctx->dc_bios) {
477                         ASSERT_CRITICAL(false);
478                         goto bios_fail;
479                 }
480
481                 dc_ctx->created_bios = true;
482         }
483
484         /* Create I2C AUX */
485         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
486
487         if (!dc_ctx->i2caux) {
488                 ASSERT_CRITICAL(false);
489                 goto failed_to_create_i2caux;
490         }
491
492         /* Create GPIO service */
493         dc_ctx->gpio_service = dal_gpio_service_create(
494                         dc_version,
495                         dc_ctx->dce_environment,
496                         dc_ctx);
497
498         if (!dc_ctx->gpio_service) {
499                 ASSERT_CRITICAL(false);
500                 goto gpio_fail;
501         }
502
503         dc->res_pool = dc_create_resource_pool(
504                         dc,
505                         init_params->num_virtual_links,
506                         dc_version,
507                         init_params->asic_id);
508         if (!dc->res_pool)
509                 goto create_resource_fail;
510
511         if (!create_links(dc, init_params->num_virtual_links))
512                 goto create_links_fail;
513
514         allocate_dc_stream_funcs(dc);
515
516         return true;
517
518         /**** error handling here ****/
519 create_links_fail:
520 create_resource_fail:
521 gpio_fail:
522 failed_to_create_i2caux:
523 bios_fail:
524 logger_fail:
525 val_ctx_fail:
526 ctx_fail:
527         destruct(dc);
528         return false;
529 }
530
531 /*
532 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
533 {
534         fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
535         unsigned int pixDurationInPico = round(pixel_duration);
536
537         DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
538
539         arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
540         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
541         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
542
543         arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
544         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
545         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
546
547         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
548         WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
549
550         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
551         WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
552 }
553 */
554
555 /*******************************************************************************
556  * Public functions
557  ******************************************************************************/
558
559 struct dc *dc_create(const struct dc_init_data *init_params)
560  {
561         struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
562         unsigned int full_pipe_count;
563
564         if (NULL == core_dc)
565                 goto alloc_fail;
566
567         if (false == construct(core_dc, init_params))
568                 goto construct_fail;
569
570         /*TODO: separate HW and SW initialization*/
571         core_dc->hwss.init_hw(core_dc);
572
573         full_pipe_count = core_dc->res_pool->pipe_count;
574         if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
575                 full_pipe_count--;
576         core_dc->public.caps.max_streams = min(
577                         full_pipe_count,
578                         core_dc->res_pool->stream_enc_count);
579
580         core_dc->public.caps.max_links = core_dc->link_count;
581         core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
582
583         core_dc->public.config = init_params->flags;
584
585         dm_logger_write(core_dc->ctx->logger, LOG_DC,
586                         "Display Core initialized\n");
587
588
589         /* TODO: missing feature to be enabled */
590         core_dc->public.debug.disable_dfs_bypass = true;
591
592         return &core_dc->public;
593
594 construct_fail:
595         dm_free(core_dc);
596
597 alloc_fail:
598         return NULL;
599 }
600
601 void dc_destroy(struct dc **dc)
602 {
603         struct core_dc *core_dc = DC_TO_CORE(*dc);
604         destruct(core_dc);
605         dm_free(core_dc);
606         *dc = NULL;
607 }
608
609 static bool is_validation_required(
610                 const struct core_dc *dc,
611                 const struct dc_validation_set set[],
612                 int set_count)
613 {
614         const struct validate_context *context = dc->current_context;
615         int i, j;
616
617         if (context->stream_count != set_count)
618                 return true;
619
620         for (i = 0; i < set_count; i++) {
621
622                 if (set[i].surface_count != context->stream_status[i].surface_count)
623                         return true;
624                 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
625                         return true;
626
627                 for (j = 0; j < set[i].surface_count; j++) {
628                         struct dc_surface temp_surf = { 0 };
629
630                         temp_surf = *context->stream_status[i].surfaces[j];
631                         temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
632                         temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
633                         temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
634
635                         if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
636                                 return true;
637                 }
638         }
639
640         return false;
641 }
642
643 bool dc_validate_resources(
644                 const struct dc *dc,
645                 const struct dc_validation_set set[],
646                 uint8_t set_count)
647 {
648         struct core_dc *core_dc = DC_TO_CORE(dc);
649         enum dc_status result = DC_ERROR_UNEXPECTED;
650         struct validate_context *context;
651
652         if (!is_validation_required(core_dc, set, set_count))
653                 return true;
654
655         context = dm_alloc(sizeof(struct validate_context));
656         if(context == NULL)
657                 goto context_alloc_fail;
658
659         result = core_dc->res_pool->funcs->validate_with_context(
660                                                 core_dc, set, set_count, context);
661
662         resource_validate_ctx_destruct(context);
663         dm_free(context);
664
665 context_alloc_fail:
666         if (result != DC_OK) {
667                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
668                                 "%s:resource validation failed, dc_status:%d\n",
669                                 __func__,
670                                 result);
671         }
672
673         return (result == DC_OK);
674
675 }
676
677 bool dc_validate_guaranteed(
678                 const struct dc *dc,
679                 const struct dc_stream *stream)
680 {
681         struct core_dc *core_dc = DC_TO_CORE(dc);
682         enum dc_status result = DC_ERROR_UNEXPECTED;
683         struct validate_context *context;
684
685         context = dm_alloc(sizeof(struct validate_context));
686         if (context == NULL)
687                 goto context_alloc_fail;
688
689         result = core_dc->res_pool->funcs->validate_guaranteed(
690                                         core_dc, stream, context);
691
692         resource_validate_ctx_destruct(context);
693         dm_free(context);
694
695 context_alloc_fail:
696         if (result != DC_OK) {
697                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
698                         "%s:guaranteed validation failed, dc_status:%d\n",
699                         __func__,
700                         result);
701                 }
702
703         return (result == DC_OK);
704 }
705
706 static void program_timing_sync(
707                 struct core_dc *core_dc,
708                 struct validate_context *ctx)
709 {
710         int i, j;
711         int group_index = 0;
712         int pipe_count = ctx->res_ctx.pool->pipe_count;
713         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
714
715         for (i = 0; i < pipe_count; i++) {
716                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
717                         continue;
718
719                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
720         }
721
722         for (i = 0; i < pipe_count; i++) {
723                 int group_size = 1;
724                 struct pipe_ctx *pipe_set[MAX_PIPES];
725
726                 if (!unsynced_pipes[i])
727                         continue;
728
729                 pipe_set[0] = unsynced_pipes[i];
730                 unsynced_pipes[i] = NULL;
731
732                 /* Add tg to the set, search rest of the tg's for ones with
733                  * same timing, add all tgs with same timing to the group
734                  */
735                 for (j = i + 1; j < pipe_count; j++) {
736                         if (!unsynced_pipes[j])
737                                 continue;
738
739                         if (resource_are_streams_timing_synchronizable(
740                                         unsynced_pipes[j]->stream,
741                                         pipe_set[0]->stream)) {
742                                 pipe_set[group_size] = unsynced_pipes[j];
743                                 unsynced_pipes[j] = NULL;
744                                 group_size++;
745                         }
746                 }
747
748                 /* set first unblanked pipe as master */
749                 for (j = 0; j < group_size; j++) {
750                         struct pipe_ctx *temp;
751
752                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
753                                 if (j == 0)
754                                         break;
755
756                                 temp = pipe_set[0];
757                                 pipe_set[0] = pipe_set[j];
758                                 pipe_set[j] = temp;
759                                 break;
760                         }
761                 }
762
763                 /* remove any other unblanked pipes as they have already been synced */
764                 for (j = j + 1; j < group_size; j++) {
765                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
766                                 group_size--;
767                                 pipe_set[j] = pipe_set[group_size];
768                                 j--;
769                         }
770                 }
771
772                 if (group_size > 1) {
773                         core_dc->hwss.enable_timing_synchronization(
774                                 core_dc, group_index, group_size, pipe_set);
775                         group_index++;
776                 }
777         }
778 }
779
780 static bool streams_changed(
781                 struct core_dc *dc,
782                 const struct dc_stream *streams[],
783                 uint8_t stream_count)
784 {
785         uint8_t i;
786
787         if (stream_count != dc->current_context->stream_count)
788                 return true;
789
790         for (i = 0; i < dc->current_context->stream_count; i++) {
791                 if (&dc->current_context->streams[i]->public != streams[i])
792                         return true;
793         }
794
795         return false;
796 }
797
798 bool dc_commit_streams(
799         struct dc *dc,
800         const struct dc_stream *streams[],
801         uint8_t stream_count)
802 {
803         struct core_dc *core_dc = DC_TO_CORE(dc);
804         struct dc_bios *dcb = core_dc->ctx->dc_bios;
805         enum dc_status result = DC_ERROR_UNEXPECTED;
806         struct validate_context *context;
807         struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
808         int i, j, k;
809
810         if (false == streams_changed(core_dc, streams, stream_count))
811                 return DC_OK;
812
813         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
814                                 __func__, stream_count);
815
816         for (i = 0; i < stream_count; i++) {
817                 const struct dc_stream *stream = streams[i];
818                 const struct dc_stream_status *status = dc_stream_get_status(stream);
819                 int j;
820
821                 dc_stream_log(stream,
822                                 core_dc->ctx->logger,
823                                 LOG_DC);
824
825                 set[i].stream = stream;
826
827                 if (status) {
828                         set[i].surface_count = status->surface_count;
829                         for (j = 0; j < status->surface_count; j++)
830                                 set[i].surfaces[j] = status->surfaces[j];
831                 }
832
833         }
834
835         context = dm_alloc(sizeof(struct validate_context));
836         if (context == NULL)
837                 goto context_alloc_fail;
838
839         result = core_dc->res_pool->funcs->validate_with_context(core_dc, set, stream_count, context);
840         if (result != DC_OK){
841                 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
842                                         "%s: Context validation failed! dc_status:%d\n",
843                                         __func__,
844                                         result);
845                 BREAK_TO_DEBUGGER();
846                 resource_validate_ctx_destruct(context);
847                 goto fail;
848         }
849
850         if (!dcb->funcs->is_accelerated_mode(dcb)) {
851                 core_dc->hwss.enable_accelerated_mode(core_dc);
852         }
853
854         if (result == DC_OK) {
855                 result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
856         }
857
858         program_timing_sync(core_dc, context);
859
860         for (i = 0; i < context->stream_count; i++) {
861                 const struct core_sink *sink = context->streams[i]->sink;
862
863                 for (j = 0; j < context->stream_status[i].surface_count; j++) {
864                         const struct dc_surface *dc_surface =
865                                         context->stream_status[i].surfaces[j];
866
867                         for (k = 0; k < context->res_ctx.pool->pipe_count; k++) {
868                                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[k];
869
870                                 if (dc_surface != &pipe->surface->public
871                                                 || !dc_surface->visible)
872                                         continue;
873
874                                 pipe->tg->funcs->set_blank(pipe->tg, false);
875                         }
876                 }
877
878                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
879                                 context->streams[i]->public.timing.h_addressable,
880                                 context->streams[i]->public.timing.v_addressable,
881                                 context->streams[i]->public.timing.h_total,
882                                 context->streams[i]->public.timing.v_total,
883                                 context->streams[i]->public.timing.pix_clk_khz);
884         }
885
886         resource_validate_ctx_destruct(core_dc->current_context);
887
888         if (core_dc->temp_flip_context != core_dc->current_context) {
889                 dm_free(core_dc->temp_flip_context);
890                 core_dc->temp_flip_context = core_dc->current_context;
891         }
892         core_dc->current_context = context;
893         memset(core_dc->temp_flip_context, 0, sizeof(*core_dc->temp_flip_context));
894
895         return (result == DC_OK);
896
897 fail:
898         dm_free(context);
899
900 context_alloc_fail:
901         return (result == DC_OK);
902 }
903
904 bool dc_pre_update_surfaces_to_stream(
905                 struct dc *dc,
906                 const struct dc_surface *const *new_surfaces,
907                 uint8_t new_surface_count,
908                 const struct dc_stream *dc_stream)
909 {
910         int i, j;
911         struct core_dc *core_dc = DC_TO_CORE(dc);
912         struct dc_stream_status *stream_status = NULL;
913         struct validate_context *context;
914         bool ret = true;
915
916         pre_surface_trace(dc, new_surfaces, new_surface_count);
917
918         if (core_dc->current_context->stream_count == 0)
919                 return false;
920
921         /* Cannot commit surface to a stream that is not commited */
922         for (i = 0; i < core_dc->current_context->stream_count; i++)
923                 if (dc_stream == &core_dc->current_context->streams[i]->public)
924                         break;
925
926         if (i == core_dc->current_context->stream_count)
927                 return false;
928
929         stream_status = &core_dc->current_context->stream_status[i];
930
931         if (new_surface_count == stream_status->surface_count) {
932                 bool skip_pre = true;
933
934                 for (i = 0; i < stream_status->surface_count; i++) {
935                         struct dc_surface temp_surf = { 0 };
936
937                         temp_surf = *stream_status->surfaces[i];
938                         temp_surf.clip_rect = new_surfaces[i]->clip_rect;
939                         temp_surf.dst_rect.x = new_surfaces[i]->dst_rect.x;
940                         temp_surf.dst_rect.y = new_surfaces[i]->dst_rect.y;
941
942                         if (memcmp(&temp_surf, new_surfaces[i], sizeof(temp_surf)) != 0) {
943                                 skip_pre = false;
944                                 break;
945                         }
946                 }
947
948                 if (skip_pre)
949                         return true;
950         }
951
952         context = dm_alloc(sizeof(struct validate_context));
953
954         if (!context) {
955                 dm_error("%s: failed to create validate ctx\n", __func__);
956                 ret = false;
957                 goto val_ctx_fail;
958         }
959
960         resource_validate_ctx_copy_construct(core_dc->current_context, context);
961
962         dm_logger_write(core_dc->ctx->logger, LOG_DC,
963                                 "%s: commit %d surfaces to stream 0x%x\n",
964                                 __func__,
965                                 new_surface_count,
966                                 dc_stream);
967
968         if (!resource_attach_surfaces_to_context(
969                         new_surfaces, new_surface_count, dc_stream, context)) {
970                 BREAK_TO_DEBUGGER();
971                 ret = false;
972                 goto unexpected_fail;
973         }
974
975         for (i = 0; i < new_surface_count; i++)
976                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
977                         if (context->res_ctx.pipe_ctx[j].surface !=
978                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
979                                 continue;
980
981                         resource_build_scaling_params(
982                                 new_surfaces[i], &context->res_ctx.pipe_ctx[j]);
983                 }
984
985         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
986                 BREAK_TO_DEBUGGER();
987                 ret = false;
988                 goto unexpected_fail;
989         }
990
991         core_dc->hwss.set_bandwidth(core_dc, context, false);
992
993         for (i = 0; i < new_surface_count; i++)
994                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
995                         if (context->res_ctx.pipe_ctx[j].surface !=
996                                         DC_SURFACE_TO_CORE(new_surfaces[i]))
997                                 continue;
998
999                         core_dc->hwss.prepare_pipe_for_context(
1000                                         core_dc,
1001                                         &context->res_ctx.pipe_ctx[j],
1002                                         context);
1003                 }
1004
1005 unexpected_fail:
1006         resource_validate_ctx_destruct(context);
1007         dm_free(context);
1008 val_ctx_fail:
1009
1010         return ret;
1011 }
1012
1013 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1014 {
1015         int i;
1016         struct core_dc *core_dc = DC_TO_CORE(dc);
1017         struct validate_context *context = dm_alloc(sizeof(struct validate_context));
1018
1019         if (!context) {
1020                 dm_error("%s: failed to create validate ctx\n", __func__);
1021                 return false;
1022         }
1023         resource_validate_ctx_copy_construct(core_dc->current_context, context);
1024
1025         post_surface_trace(dc);
1026
1027         for (i = 0; i < context->res_ctx.pool->pipe_count; i++)
1028                 if (context->res_ctx.pipe_ctx[i].stream == NULL) {
1029                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1030                         core_dc->hwss.power_down_front_end(
1031                                         core_dc, &context->res_ctx.pipe_ctx[i]);
1032                 }
1033         if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1034                 BREAK_TO_DEBUGGER();
1035                 return false;
1036         }
1037
1038         core_dc->hwss.set_bandwidth(core_dc, context, true);
1039
1040         resource_validate_ctx_destruct(core_dc->current_context);
1041         core_dc->current_context = context;
1042
1043         return true;
1044 }
1045
1046 bool dc_commit_surfaces_to_stream(
1047                 struct dc *dc,
1048                 const struct dc_surface **new_surfaces,
1049                 uint8_t new_surface_count,
1050                 const struct dc_stream *dc_stream)
1051 {
1052         struct dc_surface_update updates[MAX_SURFACES];
1053         struct dc_flip_addrs flip_addr[MAX_SURFACES];
1054         struct dc_plane_info plane_info[MAX_SURFACES];
1055         struct dc_scaling_info scaling_info[MAX_SURFACES];
1056         int i;
1057
1058         if (!dc_pre_update_surfaces_to_stream(
1059                         dc, new_surfaces, new_surface_count, dc_stream))
1060                 return false;
1061
1062         memset(updates, 0, sizeof(updates));
1063         memset(flip_addr, 0, sizeof(flip_addr));
1064         memset(plane_info, 0, sizeof(plane_info));
1065         memset(scaling_info, 0, sizeof(scaling_info));
1066
1067         for (i = 0; i < new_surface_count; i++) {
1068                 updates[i].surface = new_surfaces[i];
1069                 updates[i].gamma =
1070                         (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1071                 flip_addr[i].address = new_surfaces[i]->address;
1072                 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1073                 plane_info[i].color_space = new_surfaces[i]->color_space;
1074                 plane_info[i].format = new_surfaces[i]->format;
1075                 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1076                 plane_info[i].rotation = new_surfaces[i]->rotation;
1077                 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1078                 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1079                 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1080                 plane_info[i].visible = new_surfaces[i]->visible;
1081                 plane_info[i].dcc = new_surfaces[i]->dcc;
1082                 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1083                 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1084                 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1085                 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1086
1087                 updates[i].flip_addr = &flip_addr[i];
1088                 updates[i].plane_info = &plane_info[i];
1089                 updates[i].scaling_info = &scaling_info[i];
1090         }
1091         dc_update_surfaces_for_stream(dc, updates, new_surface_count, dc_stream);
1092
1093         return dc_post_update_surfaces_to_stream(dc);
1094 }
1095
1096 static bool is_surface_in_context(
1097                 const struct validate_context *context,
1098                 const struct dc_surface *surface)
1099 {
1100         int j;
1101
1102         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1103                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1104
1105                 if (surface == &pipe_ctx->surface->public) {
1106                         return true;
1107                 }
1108         }
1109
1110         return false;
1111 }
1112
1113 enum surface_update_type {
1114         UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
1115         UPDATE_TYPE_MED,  /* a lot of programming needed.  may need to alloc */
1116         UPDATE_TYPE_FULL, /* may need to shuffle resources */
1117 };
1118
1119 static enum surface_update_type det_surface_update(
1120                 const struct core_dc *dc,
1121                 const struct dc_surface_update *u)
1122 {
1123         const struct validate_context *context = dc->current_context;
1124
1125         if (u->scaling_info || u->plane_info)
1126                 /* todo: not all scale and plane_info update need full update
1127                  * ie. check if following is the same
1128                  * scale ratio, view port, surface bpp etc
1129                  */
1130                 return UPDATE_TYPE_FULL; /* may need bandwidth update */
1131
1132         if (!is_surface_in_context(context, u->surface))
1133                 return UPDATE_TYPE_FULL;
1134
1135         if (u->in_transfer_func ||
1136                 u->out_transfer_func ||
1137                 u->hdr_static_metadata)
1138                 return UPDATE_TYPE_MED;
1139
1140         return UPDATE_TYPE_FAST;
1141 }
1142
1143 static enum surface_update_type check_update_surfaces_for_stream(
1144                 struct core_dc *dc,
1145                 struct dc_surface_update *updates,
1146                 int surface_count,
1147                 const struct dc_stream_status *stream_status)
1148 {
1149         int i;
1150         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1151
1152         if (stream_status->surface_count != surface_count)
1153                 return UPDATE_TYPE_FULL;
1154
1155         for (i = 0 ; i < surface_count; i++) {
1156                 enum surface_update_type type =
1157                                 det_surface_update(dc, &updates[i]);
1158
1159                 if (type == UPDATE_TYPE_FULL)
1160                         return type;
1161
1162                 if (overall_type < type)
1163                         overall_type = type;
1164         }
1165
1166         return overall_type;
1167 }
1168
1169 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1170
1171 void dc_update_surfaces_for_stream(struct dc *dc,
1172                 struct dc_surface_update *updates, int surface_count,
1173                 const struct dc_stream *dc_stream)
1174 {
1175         struct core_dc *core_dc = DC_TO_CORE(dc);
1176         struct validate_context *context;
1177         int i, j;
1178
1179         enum surface_update_type update_type;
1180         const struct dc_stream_status *stream_status;
1181         unsigned int lock_mask = 0;
1182
1183         stream_status = dc_stream_get_status(dc_stream);
1184         ASSERT(stream_status);
1185         if (!stream_status)
1186                 return; /* Cannot commit surface to stream that is not committed */
1187
1188         update_type = check_update_surfaces_for_stream(
1189                         core_dc, updates, surface_count, stream_status);
1190
1191         if (update_type >= update_surface_trace_level)
1192                 update_surface_trace(dc, updates, surface_count);
1193
1194         if (update_type >= UPDATE_TYPE_FULL) {
1195                 const struct dc_surface *new_surfaces[MAX_SURFACES] = { 0 };
1196
1197                 for (i = 0; i < surface_count; i++)
1198                         new_surfaces[i] = updates[i].surface;
1199
1200                 /* initialize scratch memory for building context */
1201                 context = core_dc->temp_flip_context;
1202                 resource_validate_ctx_copy_construct(
1203                                 core_dc->current_context, context);
1204
1205                 /* add surface to context */
1206                 if (!resource_attach_surfaces_to_context(
1207                                 new_surfaces, surface_count, dc_stream, context)) {
1208                         BREAK_TO_DEBUGGER();
1209                         return;
1210                 }
1211         } else {
1212                 context = core_dc->current_context;
1213         }
1214         for (i = 0; i < surface_count; i++) {
1215                 /* save update param into surface */
1216                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1217                 struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1218
1219                 if (updates[i].flip_addr) {
1220                         surface->public.address = updates[i].flip_addr->address;
1221                         surface->public.flip_immediate =
1222                                         updates[i].flip_addr->flip_immediate;
1223                 }
1224
1225                 if (updates[i].scaling_info) {
1226                         surface->public.scaling_quality =
1227                                         updates[i].scaling_info->scaling_quality;
1228                         surface->public.dst_rect =
1229                                         updates[i].scaling_info->dst_rect;
1230                         surface->public.src_rect =
1231                                         updates[i].scaling_info->src_rect;
1232                         surface->public.clip_rect =
1233                                         updates[i].scaling_info->clip_rect;
1234                 }
1235
1236                 if (updates[i].plane_info) {
1237                         surface->public.color_space =
1238                                         updates[i].plane_info->color_space;
1239                         surface->public.format =
1240                                         updates[i].plane_info->format;
1241                         surface->public.plane_size =
1242                                         updates[i].plane_info->plane_size;
1243                         surface->public.rotation =
1244                                         updates[i].plane_info->rotation;
1245                         surface->public.horizontal_mirror =
1246                                         updates[i].plane_info->horizontal_mirror;
1247                         surface->public.stereo_format =
1248                                         updates[i].plane_info->stereo_format;
1249                         surface->public.tiling_info =
1250                                         updates[i].plane_info->tiling_info;
1251                         surface->public.visible =
1252                                         updates[i].plane_info->visible;
1253                         surface->public.dcc =
1254                                         updates[i].plane_info->dcc;
1255                 }
1256
1257                 /* not sure if we still need this */
1258                 if (update_type == UPDATE_TYPE_FULL) {
1259                         for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1260                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1261
1262                                 if (pipe_ctx->surface != surface)
1263                                         continue;
1264
1265                                 resource_build_scaling_params(updates[i].surface, pipe_ctx);
1266                         }
1267                 }
1268
1269                 if (updates[i].gamma &&
1270                         updates[i].gamma != surface->public.gamma_correction) {
1271                         if (surface->public.gamma_correction != NULL)
1272                                 dc_gamma_release(&surface->public.
1273                                                 gamma_correction);
1274
1275                         dc_gamma_retain(updates[i].gamma);
1276                         surface->public.gamma_correction =
1277                                                 updates[i].gamma;
1278                 }
1279
1280                 if (updates[i].in_transfer_func &&
1281                         updates[i].in_transfer_func != surface->public.in_transfer_func) {
1282                         if (surface->public.in_transfer_func != NULL)
1283                                 dc_transfer_func_release(
1284                                                 surface->public.
1285                                                 in_transfer_func);
1286
1287                         dc_transfer_func_retain(
1288                                         updates[i].in_transfer_func);
1289                         surface->public.in_transfer_func =
1290                                         updates[i].in_transfer_func;
1291                 }
1292
1293                 if (updates[i].out_transfer_func &&
1294                         updates[i].out_transfer_func != dc_stream->out_transfer_func) {
1295                         if (dc_stream->out_transfer_func != NULL)
1296                                 dc_transfer_func_release(dc_stream->out_transfer_func);
1297                         dc_transfer_func_retain(updates[i].out_transfer_func);
1298                         stream->public.out_transfer_func = updates[i].out_transfer_func;
1299                 }
1300                 if (updates[i].hdr_static_metadata)
1301                         surface->public.hdr_static_ctx =
1302                                 *(updates[i].hdr_static_metadata);
1303         }
1304
1305         if (update_type == UPDATE_TYPE_FULL &&
1306                         !core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1307                 BREAK_TO_DEBUGGER();
1308                 return;
1309         }
1310
1311         if (!surface_count)  /* reset */
1312                 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1313
1314         for (i = 0; i < surface_count; i++) {
1315                 struct core_surface *surface = DC_SURFACE_TO_CORE(updates[i].surface);
1316
1317                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1318                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1319                         if (pipe_ctx->surface != surface)
1320                                 continue;
1321                         /*lock all the MCPP if blnd is enable for DRR*/
1322                         if ((update_type == UPDATE_TYPE_FAST &&
1323                                         (dc_stream->freesync_ctx.enabled == true &&
1324                                                         surface_count != context->res_ctx.pool->pipe_count)) &&
1325                                         !pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1326                                 lock_mask = PIPE_LOCK_CONTROL_MPCC_ADDR;
1327                                 core_dc->hwss.pipe_control_lock(
1328                                                 core_dc,
1329                                                 pipe_ctx,
1330                                                 lock_mask,
1331                                                 true);
1332                                 }
1333                         }
1334                 for (j = 0; j < context->res_ctx.pool->pipe_count; j++) {
1335                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1336                         struct pipe_ctx *cur_pipe_ctx;
1337                         bool is_new_pipe_surface = true;
1338
1339                         if (pipe_ctx->surface != surface)
1340                                 continue;
1341                         if (update_type != UPDATE_TYPE_FAST &&
1342                                 !pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1343                                 lock_mask = PIPE_LOCK_CONTROL_GRAPHICS |
1344                                                 PIPE_LOCK_CONTROL_SCL |
1345                                                 PIPE_LOCK_CONTROL_BLENDER |
1346                                                 PIPE_LOCK_CONTROL_MODE;
1347                                 core_dc->hwss.pipe_control_lock(
1348                                                 core_dc,
1349                                                 pipe_ctx,
1350                                                 lock_mask,
1351                                                 true);
1352                         }
1353
1354                         if (update_type == UPDATE_TYPE_FULL) {
1355                                 /* only apply for top pipe */
1356                                 if (!pipe_ctx->top_pipe) {
1357                                         core_dc->hwss.apply_ctx_for_surface(core_dc,
1358                                                          surface, context);
1359                                         context_timing_trace(dc, &context->res_ctx);
1360                                 }
1361                         }
1362
1363                         if (updates[i].flip_addr)
1364                                 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1365
1366                         if (update_type == UPDATE_TYPE_FAST)
1367                                 continue;
1368
1369                         cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1370                         if (cur_pipe_ctx->surface == pipe_ctx->surface)
1371                                 is_new_pipe_surface = false;
1372
1373                         if (is_new_pipe_surface ||
1374                                         updates[i].in_transfer_func)
1375                                 core_dc->hwss.set_input_transfer_func(
1376                                                 pipe_ctx, pipe_ctx->surface);
1377
1378                         if (is_new_pipe_surface ||
1379                                         updates[i].out_transfer_func)
1380                                 core_dc->hwss.set_output_transfer_func(
1381                                                 pipe_ctx,
1382                                                 pipe_ctx->surface,
1383                                                 pipe_ctx->stream);
1384
1385                         if (updates[i].hdr_static_metadata) {
1386                                 resource_build_info_frame(pipe_ctx);
1387                                 core_dc->hwss.update_info_frame(pipe_ctx);
1388                         }
1389                 }
1390         }
1391
1392         if (update_type == UPDATE_TYPE_FAST && (lock_mask == 0))
1393                 return;
1394
1395         for (i = context->res_ctx.pool->pipe_count - 1; i >= 0; i--) {
1396                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1397
1398                 for (j = 0; j < surface_count; j++) {
1399                         if (updates[j].surface == &pipe_ctx->surface->public) {
1400                                 if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1401                                         core_dc->hwss.pipe_control_lock(
1402                                                         core_dc,
1403                                                         pipe_ctx,
1404                                                         lock_mask,
1405                                                         false);
1406                                 }
1407                                 break;
1408                         }
1409                 }
1410         }
1411
1412         if (core_dc->current_context != context) {
1413                 resource_validate_ctx_destruct(core_dc->current_context);
1414                 core_dc->temp_flip_context = core_dc->current_context;
1415
1416                 core_dc->current_context = context;
1417         }
1418 }
1419
1420 uint8_t dc_get_current_stream_count(const struct dc *dc)
1421 {
1422         struct core_dc *core_dc = DC_TO_CORE(dc);
1423         return core_dc->current_context->stream_count;
1424 }
1425
1426 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1427 {
1428         struct core_dc *core_dc = DC_TO_CORE(dc);
1429         if (i < core_dc->current_context->stream_count)
1430                 return &(core_dc->current_context->streams[i]->public);
1431         return NULL;
1432 }
1433
1434 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1435 {
1436         struct core_dc *core_dc = DC_TO_CORE(dc);
1437         return &core_dc->links[link_index]->public;
1438 }
1439
1440 const struct graphics_object_id dc_get_link_id_at_index(
1441         struct dc *dc, uint32_t link_index)
1442 {
1443         struct core_dc *core_dc = DC_TO_CORE(dc);
1444         return core_dc->links[link_index]->link_id;
1445 }
1446
1447 const struct ddc_service *dc_get_ddc_at_index(
1448         struct dc *dc, uint32_t link_index)
1449 {
1450         struct core_dc *core_dc = DC_TO_CORE(dc);
1451         return core_dc->links[link_index]->ddc;
1452 }
1453
1454 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1455         struct dc *dc, uint32_t link_index)
1456 {
1457         struct core_dc *core_dc = DC_TO_CORE(dc);
1458         return core_dc->links[link_index]->public.irq_source_hpd;
1459 }
1460
1461 const struct audio **dc_get_audios(struct dc *dc)
1462 {
1463         struct core_dc *core_dc = DC_TO_CORE(dc);
1464         return (const struct audio **)core_dc->res_pool->audios;
1465 }
1466
1467 void dc_flip_surface_addrs(
1468                 struct dc *dc,
1469                 const struct dc_surface *const surfaces[],
1470                 struct dc_flip_addrs flip_addrs[],
1471                 uint32_t count)
1472 {
1473         struct core_dc *core_dc = DC_TO_CORE(dc);
1474         int i, j;
1475
1476         for (i = 0; i < count; i++) {
1477                 struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
1478
1479                 surface->public.address = flip_addrs[i].address;
1480                 surface->public.flip_immediate = flip_addrs[i].flip_immediate;
1481
1482                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1483                         struct pipe_ctx *pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1484
1485                         if (pipe_ctx->surface != surface)
1486                                 continue;
1487
1488                         core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1489                 }
1490         }
1491 }
1492
1493 enum dc_irq_source dc_interrupt_to_irq_source(
1494                 struct dc *dc,
1495                 uint32_t src_id,
1496                 uint32_t ext_id)
1497 {
1498         struct core_dc *core_dc = DC_TO_CORE(dc);
1499         return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1500 }
1501
1502 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1503 {
1504         struct core_dc *core_dc = DC_TO_CORE(dc);
1505         dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1506 }
1507
1508 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1509 {
1510         struct core_dc *core_dc = DC_TO_CORE(dc);
1511         dal_irq_service_ack(core_dc->res_pool->irqs, src);
1512 }
1513
1514 void dc_set_power_state(
1515         struct dc *dc,
1516         enum dc_acpi_cm_power_state power_state,
1517         enum dc_video_power_state video_power_state)
1518 {
1519         struct core_dc *core_dc = DC_TO_CORE(dc);
1520
1521         core_dc->previous_power_state = core_dc->current_power_state;
1522         core_dc->current_power_state = video_power_state;
1523
1524         switch (power_state) {
1525         case DC_ACPI_CM_POWER_STATE_D0:
1526                 core_dc->hwss.init_hw(core_dc);
1527                 break;
1528         default:
1529                 /* NULL means "reset/release all DC streams" */
1530                 dc_commit_streams(dc, NULL, 0);
1531
1532                 core_dc->hwss.power_down(core_dc);
1533
1534                 /* Zero out the current context so that on resume we start with
1535                  * clean state, and dc hw programming optimizations will not
1536                  * cause any trouble.
1537                  */
1538                 memset(core_dc->current_context, 0,
1539                                 sizeof(*core_dc->current_context));
1540
1541                 core_dc->current_context->res_ctx.pool = core_dc->res_pool;
1542
1543                 break;
1544         }
1545
1546 }
1547
1548 void dc_resume(const struct dc *dc)
1549 {
1550         struct core_dc *core_dc = DC_TO_CORE(dc);
1551
1552         uint32_t i;
1553
1554         for (i = 0; i < core_dc->link_count; i++)
1555                 core_link_resume(core_dc->links[i]);
1556 }
1557
1558 bool dc_read_dpcd(
1559                 struct dc *dc,
1560                 uint32_t link_index,
1561                 uint32_t address,
1562                 uint8_t *data,
1563                 uint32_t size)
1564 {
1565         struct core_dc *core_dc = DC_TO_CORE(dc);
1566
1567         struct core_link *link = core_dc->links[link_index];
1568         enum ddc_result r = dal_ddc_service_read_dpcd_data(
1569                         link->ddc,
1570                         address,
1571                         data,
1572                         size);
1573         return r == DDC_RESULT_SUCESSFULL;
1574 }
1575
1576 bool dc_query_ddc_data(
1577                 struct dc *dc,
1578                 uint32_t link_index,
1579                 uint32_t address,
1580                 uint8_t *write_buf,
1581                 uint32_t write_size,
1582                 uint8_t *read_buf,
1583                 uint32_t read_size) {
1584
1585         struct core_dc *core_dc = DC_TO_CORE(dc);
1586
1587         struct core_link *link = core_dc->links[link_index];
1588
1589         bool result = dal_ddc_service_query_ddc_data(
1590                         link->ddc,
1591                         address,
1592                         write_buf,
1593                         write_size,
1594                         read_buf,
1595                         read_size);
1596
1597         return result;
1598 }
1599
1600
1601 bool dc_write_dpcd(
1602                 struct dc *dc,
1603                 uint32_t link_index,
1604                 uint32_t address,
1605                 const uint8_t *data,
1606                 uint32_t size)
1607 {
1608         struct core_dc *core_dc = DC_TO_CORE(dc);
1609
1610         struct core_link *link = core_dc->links[link_index];
1611
1612         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1613                         link->ddc,
1614                         address,
1615                         data,
1616                         size);
1617         return r == DDC_RESULT_SUCESSFULL;
1618 }
1619
1620 bool dc_submit_i2c(
1621                 struct dc *dc,
1622                 uint32_t link_index,
1623                 struct i2c_command *cmd)
1624 {
1625         struct core_dc *core_dc = DC_TO_CORE(dc);
1626
1627         struct core_link *link = core_dc->links[link_index];
1628         struct ddc_service *ddc = link->ddc;
1629
1630         return dal_i2caux_submit_i2c_command(
1631                 ddc->ctx->i2caux,
1632                 ddc->ddc_pin,
1633                 cmd);
1634 }
1635
1636 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1637 {
1638         struct dc_link *dc_link = &core_link->public;
1639
1640         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1641                 BREAK_TO_DEBUGGER();
1642                 return false;
1643         }
1644
1645         dc_sink_retain(sink);
1646
1647         dc_link->remote_sinks[dc_link->sink_count] = sink;
1648         dc_link->sink_count++;
1649
1650         return true;
1651 }
1652
1653 struct dc_sink *dc_link_add_remote_sink(
1654                 const struct dc_link *link,
1655                 const uint8_t *edid,
1656                 int len,
1657                 struct dc_sink_init_data *init_data)
1658 {
1659         struct dc_sink *dc_sink;
1660         enum dc_edid_status edid_status;
1661         struct core_link *core_link = DC_LINK_TO_LINK(link);
1662
1663         if (len > MAX_EDID_BUFFER_SIZE) {
1664                 dm_error("Max EDID buffer size breached!\n");
1665                 return NULL;
1666         }
1667
1668         if (!init_data) {
1669                 BREAK_TO_DEBUGGER();
1670                 return NULL;
1671         }
1672
1673         if (!init_data->link) {
1674                 BREAK_TO_DEBUGGER();
1675                 return NULL;
1676         }
1677
1678         dc_sink = dc_sink_create(init_data);
1679
1680         if (!dc_sink)
1681                 return NULL;
1682
1683         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1684         dc_sink->dc_edid.length = len;
1685
1686         if (!link_add_remote_sink_helper(
1687                         core_link,
1688                         dc_sink))
1689                 goto fail_add_sink;
1690
1691         edid_status = dm_helpers_parse_edid_caps(
1692                         core_link->ctx,
1693                         &dc_sink->dc_edid,
1694                         &dc_sink->edid_caps);
1695
1696         if (edid_status != EDID_OK)
1697                 goto fail;
1698
1699         return dc_sink;
1700 fail:
1701         dc_link_remove_remote_sink(link, dc_sink);
1702 fail_add_sink:
1703         dc_sink_release(dc_sink);
1704         return NULL;
1705 }
1706
1707 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1708 {
1709         struct core_link *core_link = DC_LINK_TO_LINK(link);
1710         struct dc_link *dc_link = &core_link->public;
1711
1712         dc_link->local_sink = sink;
1713
1714         if (sink == NULL) {
1715                 dc_link->type = dc_connection_none;
1716         } else {
1717                 dc_link->type = dc_connection_single;
1718         }
1719 }
1720
1721 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1722 {
1723         int i;
1724         struct core_link *core_link = DC_LINK_TO_LINK(link);
1725         struct dc_link *dc_link = &core_link->public;
1726
1727         if (!link->sink_count) {
1728                 BREAK_TO_DEBUGGER();
1729                 return;
1730         }
1731
1732         for (i = 0; i < dc_link->sink_count; i++) {
1733                 if (dc_link->remote_sinks[i] == sink) {
1734                         dc_sink_release(sink);
1735                         dc_link->remote_sinks[i] = NULL;
1736
1737                         /* shrink array to remove empty place */
1738                         while (i < dc_link->sink_count - 1) {
1739                                 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
1740                                 i++;
1741                         }
1742
1743                         dc_link->sink_count--;
1744                         return;
1745                 }
1746         }
1747 }
1748