drm/amd/display: break up plane disable and disconnect in set mode
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "dce_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51
52 /*******************************************************************************
53  * Private functions
54  ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
56 {
57         uint32_t i;
58
59         for (i = 0; i < dc->link_count; i++) {
60                 if (NULL != dc->links[i])
61                         link_destroy(&dc->links[i]);
62         }
63 }
64
65 static bool create_links(
66                 struct core_dc *dc,
67                 uint32_t num_virtual_links)
68 {
69         int i;
70         int connectors_num;
71         struct dc_bios *bios = dc->ctx->dc_bios;
72
73         dc->link_count = 0;
74
75         connectors_num = bios->funcs->get_connectors_number(bios);
76
77         if (connectors_num > ENUM_ID_COUNT) {
78                 dm_error(
79                         "DC: Number of connectors %d exceeds maximum of %d!\n",
80                         connectors_num,
81                         ENUM_ID_COUNT);
82                 return false;
83         }
84
85         if (connectors_num == 0 && num_virtual_links == 0) {
86                 dm_error("DC: Number of connectors is zero!\n");
87         }
88
89         dm_output_to_console(
90                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91                 __func__,
92                 connectors_num,
93                 num_virtual_links);
94
95         for (i = 0; i < connectors_num; i++) {
96                 struct link_init_data link_init_params = {0};
97                 struct dc_link *link;
98
99                 link_init_params.ctx = dc->ctx;
100                 /* next BIOS object table connector */
101                 link_init_params.connector_index = i;
102                 link_init_params.link_index = dc->link_count;
103                 link_init_params.dc = dc;
104                 link = link_create(&link_init_params);
105
106                 if (link) {
107                         dc->links[dc->link_count] = link;
108                         link->dc = dc;
109                         ++dc->link_count;
110                 }
111         }
112
113         for (i = 0; i < num_virtual_links; i++) {
114                 struct dc_link *link = dm_alloc(sizeof(*link));
115                 struct encoder_init_data enc_init = {0};
116
117                 if (link == NULL) {
118                         BREAK_TO_DEBUGGER();
119                         goto failed_alloc;
120                 }
121
122                 link->ctx = dc->ctx;
123                 link->dc = dc;
124                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
125                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
126                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
127                 link->link_id.enum_id = ENUM_ID_1;
128                 link->link_enc = dm_alloc(sizeof(*link->link_enc));
129
130                 enc_init.ctx = dc->ctx;
131                 enc_init.channel = CHANNEL_ID_UNKNOWN;
132                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
133                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
134                 enc_init.connector = link->link_id;
135                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
136                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
137                 enc_init.encoder.enum_id = ENUM_ID_1;
138                 virtual_link_encoder_construct(link->link_enc, &enc_init);
139
140                 link->link_index = dc->link_count;
141                 dc->links[dc->link_count] = link;
142                 dc->link_count++;
143         }
144
145         return true;
146
147 failed_alloc:
148         return false;
149 }
150
151 static bool stream_adjust_vmin_vmax(struct dc *dc,
152                 const struct dc_stream **stream, int num_streams,
153                 int vmin, int vmax)
154 {
155         /* TODO: Support multiple streams */
156         struct core_dc *core_dc = DC_TO_CORE(dc);
157         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
158         int i = 0;
159         bool ret = false;
160
161         for (i = 0; i < MAX_PIPES; i++) {
162                 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
163
164                 if (pipe->stream == core_stream && pipe->stream_enc) {
165                         core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
166
167                         /* build and update the info frame */
168                         resource_build_info_frame(pipe);
169                         core_dc->hwss.update_info_frame(pipe);
170
171                         ret = true;
172                 }
173         }
174         return ret;
175 }
176
177 static bool stream_get_crtc_position(struct dc *dc,
178                 const struct dc_stream **stream, int num_streams,
179                 unsigned int *v_pos, unsigned int *nom_v_pos)
180 {
181         /* TODO: Support multiple streams */
182         struct core_dc *core_dc = DC_TO_CORE(dc);
183         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184         int i = 0;
185         bool ret = false;
186         struct crtc_position position;
187
188         for (i = 0; i < MAX_PIPES; i++) {
189                 struct pipe_ctx *pipe =
190                                 &core_dc->current_context->res_ctx.pipe_ctx[i];
191
192                 if (pipe->stream == core_stream && pipe->stream_enc) {
193                         core_dc->hwss.get_position(&pipe, 1, &position);
194
195                         *v_pos = position.vertical_count;
196                         *nom_v_pos = position.nominal_vcount;
197                         ret = true;
198                 }
199         }
200         return ret;
201 }
202
203 static bool set_gamut_remap(struct dc *dc, const struct dc_stream *stream)
204 {
205         struct core_dc *core_dc = DC_TO_CORE(dc);
206         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
207         int i = 0;
208         bool ret = false;
209         struct pipe_ctx *pipes;
210
211         for (i = 0; i < MAX_PIPES; i++) {
212                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
213                                 == core_stream) {
214
215                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
216                         core_dc->hwss.program_gamut_remap(pipes);
217                         ret = true;
218                 }
219         }
220
221         return ret;
222 }
223
224 static bool program_csc_matrix(struct dc *dc, const struct dc_stream *stream)
225 {
226         struct core_dc *core_dc = DC_TO_CORE(dc);
227         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
228         int i = 0;
229         bool ret = false;
230         struct pipe_ctx *pipes;
231
232         for (i = 0; i < MAX_PIPES; i++) {
233                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
234                                 == core_stream) {
235
236                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
237                         core_dc->hwss.program_csc_matrix(pipes,
238                         core_stream->public.output_color_space,
239                         core_stream->public.csc_color_matrix.matrix);
240                         ret = true;
241                 }
242         }
243
244         return ret;
245 }
246
247 static void set_static_screen_events(struct dc *dc,
248                 const struct dc_stream **stream,
249                 int num_streams,
250                 const struct dc_static_screen_events *events)
251 {
252         struct core_dc *core_dc = DC_TO_CORE(dc);
253         int i = 0;
254         int j = 0;
255         struct pipe_ctx *pipes_affected[MAX_PIPES];
256         int num_pipes_affected = 0;
257
258         for (i = 0; i < num_streams; i++) {
259                 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[i]);
260
261                 for (j = 0; j < MAX_PIPES; j++) {
262                         if (core_dc->current_context->res_ctx.pipe_ctx[j].stream
263                                         == core_stream) {
264                                 pipes_affected[num_pipes_affected++] =
265                                                 &core_dc->current_context->res_ctx.pipe_ctx[j];
266                         }
267                 }
268         }
269
270         core_dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
271 }
272
273 static void set_drive_settings(struct dc *dc,
274                 struct link_training_settings *lt_settings,
275                 const struct dc_link *link)
276 {
277         struct core_dc *core_dc = DC_TO_CORE(dc);
278         int i;
279
280         for (i = 0; i < core_dc->link_count; i++) {
281                 if (core_dc->links[i] == link)
282                         break;
283         }
284
285         if (i >= core_dc->link_count)
286                 ASSERT_CRITICAL(false);
287
288         dc_link_dp_set_drive_settings(core_dc->links[i], lt_settings);
289 }
290
291 static void perform_link_training(struct dc *dc,
292                 struct dc_link_settings *link_setting,
293                 bool skip_video_pattern)
294 {
295         struct core_dc *core_dc = DC_TO_CORE(dc);
296         int i;
297
298         for (i = 0; i < core_dc->link_count; i++)
299                 dc_link_dp_perform_link_training(
300                         core_dc->links[i],
301                         link_setting,
302                         skip_video_pattern);
303 }
304
305 static void set_preferred_link_settings(struct dc *dc,
306                 struct dc_link_settings *link_setting,
307                 struct dc_link *link)
308 {
309         link->preferred_link_setting = *link_setting;
310         dp_retrain_link_dp_test(link, link_setting, false);
311 }
312
313 static void enable_hpd(const struct dc_link *link)
314 {
315         dc_link_dp_enable_hpd(link);
316 }
317
318 static void disable_hpd(const struct dc_link *link)
319 {
320         dc_link_dp_disable_hpd(link);
321 }
322
323
324 static void set_test_pattern(
325                 struct dc_link *link,
326                 enum dp_test_pattern test_pattern,
327                 const struct link_training_settings *p_link_settings,
328                 const unsigned char *p_custom_pattern,
329                 unsigned int cust_pattern_size)
330 {
331         if (link != NULL)
332                 dc_link_dp_set_test_pattern(
333                         link,
334                         test_pattern,
335                         p_link_settings,
336                         p_custom_pattern,
337                         cust_pattern_size);
338 }
339
340 void set_dither_option(const struct dc_stream *dc_stream,
341                 enum dc_dither_option option)
342 {
343         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
344         struct bit_depth_reduction_params params;
345         struct dc_link *link = stream->status.link;
346         struct pipe_ctx *pipes = link->dc->current_context->res_ctx.pipe_ctx;
347
348         memset(&params, 0, sizeof(params));
349         if (!stream)
350                 return;
351         if (option > DITHER_OPTION_MAX)
352                 return;
353         if (option == DITHER_OPTION_DEFAULT) {
354                 switch (stream->public.timing.display_color_depth) {
355                 case COLOR_DEPTH_666:
356                         stream->public.dither_option = DITHER_OPTION_SPATIAL6;
357                         break;
358                 case COLOR_DEPTH_888:
359                         stream->public.dither_option = DITHER_OPTION_SPATIAL8;
360                         break;
361                 case COLOR_DEPTH_101010:
362                         stream->public.dither_option = DITHER_OPTION_SPATIAL10;
363                         break;
364                 default:
365                         option = DITHER_OPTION_DISABLE;
366                 }
367         } else {
368                 stream->public.dither_option = option;
369         }
370         resource_build_bit_depth_reduction_params(stream,
371                                 &params);
372         stream->bit_depth_params = params;
373         pipes->opp->funcs->
374                 opp_program_bit_depth_reduction(pipes->opp, &params);
375 }
376
377 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
378 {
379         if (core_dc->hwss.set_drr != NULL) {
380                 core_dc->public.stream_funcs.adjust_vmin_vmax =
381                                 stream_adjust_vmin_vmax;
382         }
383
384         core_dc->public.stream_funcs.set_static_screen_events =
385                         set_static_screen_events;
386
387         core_dc->public.stream_funcs.get_crtc_position =
388                         stream_get_crtc_position;
389
390         core_dc->public.stream_funcs.set_gamut_remap =
391                         set_gamut_remap;
392
393         core_dc->public.stream_funcs.program_csc_matrix =
394                         program_csc_matrix;
395
396         core_dc->public.stream_funcs.set_dither_option =
397                         set_dither_option;
398
399         core_dc->public.link_funcs.set_drive_settings =
400                         set_drive_settings;
401
402         core_dc->public.link_funcs.perform_link_training =
403                         perform_link_training;
404
405         core_dc->public.link_funcs.set_preferred_link_settings =
406                         set_preferred_link_settings;
407
408         core_dc->public.link_funcs.enable_hpd =
409                         enable_hpd;
410
411         core_dc->public.link_funcs.disable_hpd =
412                         disable_hpd;
413
414         core_dc->public.link_funcs.set_test_pattern =
415                         set_test_pattern;
416 }
417
418 static void destruct(struct core_dc *dc)
419 {
420         dc_release_validate_context(dc->current_context);
421         dc->current_context = NULL;
422
423         destroy_links(dc);
424
425         dc_destroy_resource_pool(dc);
426
427         if (dc->ctx->gpio_service)
428                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
429
430         if (dc->ctx->i2caux)
431                 dal_i2caux_destroy(&dc->ctx->i2caux);
432
433         if (dc->ctx->created_bios)
434                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
435
436         if (dc->ctx->logger)
437                 dal_logger_destroy(&dc->ctx->logger);
438
439         dm_free(dc->ctx);
440         dc->ctx = NULL;
441 }
442
443 static bool construct(struct core_dc *dc,
444                 const struct dc_init_data *init_params)
445 {
446         struct dal_logger *logger;
447         struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
448         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
449
450         if (!dc_ctx) {
451                 dm_error("%s: failed to create ctx\n", __func__);
452                 goto ctx_fail;
453         }
454
455         dc->current_context = dm_alloc(sizeof(*dc->current_context));
456
457         if (!dc->current_context) {
458                 dm_error("%s: failed to create validate ctx\n", __func__);
459                 goto val_ctx_fail;
460         }
461
462         dc->current_context->ref_count++;
463
464         dc_ctx->cgs_device = init_params->cgs_device;
465         dc_ctx->driver_context = init_params->driver;
466         dc_ctx->dc = &dc->public;
467         dc_ctx->asic_id = init_params->asic_id;
468
469         /* Create logger */
470         logger = dal_logger_create(dc_ctx);
471
472         if (!logger) {
473                 /* can *not* call logger. call base driver 'print error' */
474                 dm_error("%s: failed to create Logger!\n", __func__);
475                 goto logger_fail;
476         }
477         dc_ctx->logger = logger;
478         dc->ctx = dc_ctx;
479         dc->ctx->dce_environment = init_params->dce_environment;
480
481         dc_version = resource_parse_asic_id(init_params->asic_id);
482         dc->ctx->dce_version = dc_version;
483
484         /* Resource should construct all asic specific resources.
485          * This should be the only place where we need to parse the asic id
486          */
487         if (init_params->vbios_override)
488                 dc_ctx->dc_bios = init_params->vbios_override;
489         else {
490                 /* Create BIOS parser */
491                 struct bp_init_data bp_init_data;
492
493                 bp_init_data.ctx = dc_ctx;
494                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
495
496                 dc_ctx->dc_bios = dal_bios_parser_create(
497                                 &bp_init_data, dc_version);
498
499                 if (!dc_ctx->dc_bios) {
500                         ASSERT_CRITICAL(false);
501                         goto bios_fail;
502                 }
503
504                 dc_ctx->created_bios = true;
505                 }
506
507         /* Create I2C AUX */
508         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
509
510         if (!dc_ctx->i2caux) {
511                 ASSERT_CRITICAL(false);
512                 goto failed_to_create_i2caux;
513         }
514
515         /* Create GPIO service */
516         dc_ctx->gpio_service = dal_gpio_service_create(
517                         dc_version,
518                         dc_ctx->dce_environment,
519                         dc_ctx);
520
521         if (!dc_ctx->gpio_service) {
522                 ASSERT_CRITICAL(false);
523                 goto gpio_fail;
524         }
525
526         dc->res_pool = dc_create_resource_pool(
527                         dc,
528                         init_params->num_virtual_links,
529                         dc_version,
530                         init_params->asic_id);
531         if (!dc->res_pool)
532                 goto create_resource_fail;
533
534         if (!create_links(dc, init_params->num_virtual_links))
535                 goto create_links_fail;
536
537         allocate_dc_stream_funcs(dc);
538
539         return true;
540
541         /**** error handling here ****/
542 create_links_fail:
543 create_resource_fail:
544 gpio_fail:
545 failed_to_create_i2caux:
546 bios_fail:
547 logger_fail:
548 val_ctx_fail:
549 ctx_fail:
550         destruct(dc);
551         return false;
552 }
553
554 /*
555 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
556 {
557         fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
558         unsigned int pixDurationInPico = round(pixel_duration);
559
560         DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
561
562         arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
563         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
564         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
565
566         arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
567         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
568         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
569
570         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
571         WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
572
573         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
574         WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
575 }
576 */
577
578 /*******************************************************************************
579  * Public functions
580  ******************************************************************************/
581
582 struct dc *dc_create(const struct dc_init_data *init_params)
583  {
584         struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
585         unsigned int full_pipe_count;
586
587         if (NULL == core_dc)
588                 goto alloc_fail;
589
590         if (false == construct(core_dc, init_params))
591                 goto construct_fail;
592
593         /*TODO: separate HW and SW initialization*/
594         core_dc->hwss.init_hw(core_dc);
595
596         full_pipe_count = core_dc->res_pool->pipe_count;
597         if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
598                 full_pipe_count--;
599         core_dc->public.caps.max_streams = min(
600                         full_pipe_count,
601                         core_dc->res_pool->stream_enc_count);
602
603         core_dc->public.caps.max_links = core_dc->link_count;
604         core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
605
606         core_dc->public.config = init_params->flags;
607
608         dm_logger_write(core_dc->ctx->logger, LOG_DC,
609                         "Display Core initialized\n");
610
611
612         /* TODO: missing feature to be enabled */
613         core_dc->public.debug.disable_dfs_bypass = true;
614
615         return &core_dc->public;
616
617 construct_fail:
618         dm_free(core_dc);
619
620 alloc_fail:
621         return NULL;
622 }
623
624 void dc_destroy(struct dc **dc)
625 {
626         struct core_dc *core_dc = DC_TO_CORE(*dc);
627         destruct(core_dc);
628         dm_free(core_dc);
629         *dc = NULL;
630 }
631
632 static bool is_validation_required(
633                 const struct core_dc *dc,
634                 const struct dc_validation_set set[],
635                 int set_count)
636 {
637         const struct validate_context *context = dc->current_context;
638         int i, j;
639
640         if (context->stream_count != set_count)
641                 return true;
642
643         for (i = 0; i < set_count; i++) {
644
645                 if (set[i].surface_count != context->stream_status[i].surface_count)
646                         return true;
647                 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
648                         return true;
649
650                 for (j = 0; j < set[i].surface_count; j++) {
651                         struct dc_surface temp_surf;
652                         memset(&temp_surf, 0, sizeof(temp_surf));
653
654                         temp_surf = *context->stream_status[i].surfaces[j];
655                         temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
656                         temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
657                         temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
658
659                         if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
660                                 return true;
661                 }
662         }
663
664         return false;
665 }
666
667 struct validate_context *dc_get_validate_context(
668                 const struct dc *dc,
669                 const struct dc_validation_set set[],
670                 uint8_t set_count)
671 {
672         struct core_dc *core_dc = DC_TO_CORE(dc);
673         enum dc_status result = DC_ERROR_UNEXPECTED;
674         struct validate_context *context;
675
676         context = dm_alloc(sizeof(struct validate_context));
677         if (context == NULL)
678                 goto context_alloc_fail;
679
680         ++context->ref_count;
681
682         if (!is_validation_required(core_dc, set, set_count)) {
683                 dc_resource_validate_ctx_copy_construct(core_dc->current_context, context);
684                 return context;
685         }
686
687         result = core_dc->res_pool->funcs->validate_with_context(
688                         core_dc, set, set_count, context, core_dc->current_context);
689
690 context_alloc_fail:
691         if (result != DC_OK) {
692                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
693                                 "%s:resource validation failed, dc_status:%d\n",
694                                 __func__,
695                                 result);
696
697                 dc_release_validate_context(context);
698                 context = NULL;
699         }
700
701         return context;
702
703 }
704
705 bool dc_validate_resources(
706                 const struct dc *dc,
707                 const struct dc_validation_set set[],
708                 uint8_t set_count)
709 {
710         struct core_dc *core_dc = DC_TO_CORE(dc);
711         enum dc_status result = DC_ERROR_UNEXPECTED;
712         struct validate_context *context;
713
714         context = dm_alloc(sizeof(struct validate_context));
715         if (context == NULL)
716                 goto context_alloc_fail;
717
718         ++context->ref_count;
719
720         result = core_dc->res_pool->funcs->validate_with_context(
721                                 core_dc, set, set_count, context, NULL);
722
723 context_alloc_fail:
724         if (result != DC_OK) {
725                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
726                                 "%s:resource validation failed, dc_status:%d\n",
727                                 __func__,
728                                 result);
729         }
730
731         dc_release_validate_context(context);
732         context = NULL;
733
734         return result == DC_OK;
735 }
736
737 bool dc_validate_guaranteed(
738                 const struct dc *dc,
739                 const struct dc_stream *stream)
740 {
741         struct core_dc *core_dc = DC_TO_CORE(dc);
742         enum dc_status result = DC_ERROR_UNEXPECTED;
743         struct validate_context *context;
744
745         context = dm_alloc(sizeof(struct validate_context));
746         if (context == NULL)
747                 goto context_alloc_fail;
748
749         ++context->ref_count;
750
751         result = core_dc->res_pool->funcs->validate_guaranteed(
752                                         core_dc, stream, context);
753
754         dc_release_validate_context(context);
755
756 context_alloc_fail:
757         if (result != DC_OK) {
758                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
759                         "%s:guaranteed validation failed, dc_status:%d\n",
760                         __func__,
761                         result);
762                 }
763
764         return (result == DC_OK);
765 }
766
767 static void program_timing_sync(
768                 struct core_dc *core_dc,
769                 struct validate_context *ctx)
770 {
771         int i, j;
772         int group_index = 0;
773         int pipe_count = core_dc->res_pool->pipe_count;
774         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
775
776         for (i = 0; i < pipe_count; i++) {
777                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
778                         continue;
779
780                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
781         }
782
783         for (i = 0; i < pipe_count; i++) {
784                 int group_size = 1;
785                 struct pipe_ctx *pipe_set[MAX_PIPES];
786
787                 if (!unsynced_pipes[i])
788                         continue;
789
790                 pipe_set[0] = unsynced_pipes[i];
791                 unsynced_pipes[i] = NULL;
792
793                 /* Add tg to the set, search rest of the tg's for ones with
794                  * same timing, add all tgs with same timing to the group
795                  */
796                 for (j = i + 1; j < pipe_count; j++) {
797                         if (!unsynced_pipes[j])
798                                 continue;
799
800                         if (resource_are_streams_timing_synchronizable(
801                                         unsynced_pipes[j]->stream,
802                                         pipe_set[0]->stream)) {
803                                 pipe_set[group_size] = unsynced_pipes[j];
804                                 unsynced_pipes[j] = NULL;
805                                 group_size++;
806                         }
807                 }
808
809                 /* set first unblanked pipe as master */
810                 for (j = 0; j < group_size; j++) {
811                         struct pipe_ctx *temp;
812
813                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
814                                 if (j == 0)
815                                         break;
816
817                                 temp = pipe_set[0];
818                                 pipe_set[0] = pipe_set[j];
819                                 pipe_set[j] = temp;
820                                 break;
821                         }
822                 }
823
824                 /* remove any other unblanked pipes as they have already been synced */
825                 for (j = j + 1; j < group_size; j++) {
826                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
827                                 group_size--;
828                                 pipe_set[j] = pipe_set[group_size];
829                                 j--;
830                         }
831                 }
832
833                 if (group_size > 1) {
834                         core_dc->hwss.enable_timing_synchronization(
835                                 core_dc, group_index, group_size, pipe_set);
836                         group_index++;
837                 }
838         }
839 }
840
841 static bool context_changed(
842                 struct core_dc *dc,
843                 struct validate_context *context)
844 {
845         uint8_t i;
846
847         if (context->stream_count != dc->current_context->stream_count)
848                 return true;
849
850         for (i = 0; i < dc->current_context->stream_count; i++) {
851                 if (&dc->current_context->streams[i]->public != &context->streams[i]->public)
852                         return true;
853         }
854
855         return false;
856 }
857
858 static bool streams_changed(
859                 struct core_dc *dc,
860                 const struct dc_stream *streams[],
861                 uint8_t stream_count)
862 {
863         uint8_t i;
864
865         if (stream_count != dc->current_context->stream_count)
866                 return true;
867
868         for (i = 0; i < dc->current_context->stream_count; i++) {
869                 if (&dc->current_context->streams[i]->public != streams[i])
870                         return true;
871         }
872
873         return false;
874 }
875
876 bool dc_enable_stereo(
877         struct dc *dc,
878         struct validate_context *context,
879         const struct dc_stream *streams[],
880         uint8_t stream_count)
881 {
882         bool ret = true;
883         int i, j;
884         struct pipe_ctx *pipe;
885         struct core_dc *core_dc = DC_TO_CORE(dc);
886
887 #ifdef ENABLE_FBC
888         struct compressor *fbc_compressor = core_dc->fbc_compressor;
889 #endif
890
891         for (i = 0; i < MAX_PIPES; i++) {
892                 if (context != NULL)
893                         pipe = &context->res_ctx.pipe_ctx[i];
894                 else
895                         pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
896                 for (j = 0 ; pipe && j < stream_count; j++)  {
897                         if (streams[j] && streams[j] == &pipe->stream->public &&
898                                 core_dc->hwss.setup_stereo)
899                                 core_dc->hwss.setup_stereo(pipe, core_dc);
900                 }
901         }
902
903 #ifdef ENABLE_FBC
904         if (fbc_compressor != NULL &&
905             fbc_compressor->funcs->is_fbc_enabled_in_hw(core_dc->fbc_compressor,
906                                                         &pipe->tg->inst))
907                 fbc_compressor->funcs->disable_fbc(fbc_compressor);
908
909 #endif
910         return ret;
911 }
912
913
914 /*
915  * Applies given context to HW and copy it into current context.
916  * It's up to the user to release the src context afterwards.
917  */
918 static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *context)
919 {
920         struct core_dc *core_dc = DC_TO_CORE(dc);
921         struct dc_bios *dcb = core_dc->ctx->dc_bios;
922         enum dc_status result = DC_ERROR_UNEXPECTED;
923         struct pipe_ctx *pipe;
924         int i, j, k, l;
925         const struct dc_stream *dc_streams[MAX_STREAMS] = {0};
926
927         for (i = 0; i < context->stream_count; i++)
928                 dc_streams[i] =  &context->streams[i]->public;
929
930         if (!dcb->funcs->is_accelerated_mode(dcb))
931                 core_dc->hwss.enable_accelerated_mode(core_dc);
932
933         for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
934                 pipe = &context->res_ctx.pipe_ctx[i];
935                 core_dc->hwss.wait_for_mpcc_disconnect(core_dc, core_dc->res_pool, pipe);
936         }
937         result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
938
939         program_timing_sync(core_dc, context);
940
941         for (i = 0; i < context->stream_count; i++) {
942                 const struct dc_sink *sink = context->streams[i]->sink;
943
944                 for (j = 0; j < context->stream_status[i].surface_count; j++) {
945                         const struct dc_surface *surface =
946                                         context->stream_status[i].surfaces[j];
947
948                         core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
949
950                         /*
951                          * enable stereo
952                          * TODO rework dc_enable_stereo call to work with validation sets?
953                          */
954                         for (k = 0; k < MAX_PIPES; k++) {
955                                 pipe = &context->res_ctx.pipe_ctx[k];
956
957                                 for (l = 0 ; pipe && l < context->stream_count; l++)  {
958                                         if (context->streams[l] &&
959                                             context->streams[l] == pipe->stream &&
960                                             core_dc->hwss.setup_stereo)
961                                                 core_dc->hwss.setup_stereo(pipe, core_dc);
962                                 }
963                         }
964                 }
965
966                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
967                                 context->streams[i]->public.timing.h_addressable,
968                                 context->streams[i]->public.timing.v_addressable,
969                                 context->streams[i]->public.timing.h_total,
970                                 context->streams[i]->public.timing.v_total,
971                                 context->streams[i]->public.timing.pix_clk_khz);
972         }
973
974         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
975
976         dc_release_validate_context(core_dc->current_context);
977
978         core_dc->current_context = context;
979
980         dc_retain_validate_context(core_dc->current_context);
981
982         return (result == DC_OK);
983 }
984
985 bool dc_commit_context(struct dc *dc, struct validate_context *context)
986 {
987         enum dc_status result = DC_ERROR_UNEXPECTED;
988         struct core_dc *core_dc = DC_TO_CORE(dc);
989         int i;
990
991         if (false == context_changed(core_dc, context))
992                 return DC_OK;
993
994         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
995                                 __func__, context->stream_count);
996
997         for (i = 0; i < context->stream_count; i++) {
998                 const struct dc_stream *stream = &context->streams[i]->public;
999
1000                 dc_stream_log(stream,
1001                                 core_dc->ctx->logger,
1002                                 LOG_DC);
1003         }
1004
1005         result = dc_commit_context_no_check(dc, context);
1006
1007         return (result == DC_OK);
1008 }
1009
1010
1011 bool dc_commit_streams(
1012         struct dc *dc,
1013         const struct dc_stream *streams[],
1014         uint8_t stream_count)
1015 {
1016         struct core_dc *core_dc = DC_TO_CORE(dc);
1017         enum dc_status result = DC_ERROR_UNEXPECTED;
1018         struct validate_context *context;
1019         struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
1020         int i;
1021
1022         if (false == streams_changed(core_dc, streams, stream_count))
1023                 return DC_OK;
1024
1025         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
1026                                 __func__, stream_count);
1027
1028         for (i = 0; i < stream_count; i++) {
1029                 const struct dc_stream *stream = streams[i];
1030                 const struct dc_stream_status *status = dc_stream_get_status(stream);
1031                 int j;
1032
1033                 dc_stream_log(stream,
1034                                 core_dc->ctx->logger,
1035                                 LOG_DC);
1036
1037                 set[i].stream = stream;
1038
1039                 if (status) {
1040                         set[i].surface_count = status->surface_count;
1041                         for (j = 0; j < status->surface_count; j++)
1042                                 set[i].surfaces[j] = status->surfaces[j];
1043                 }
1044
1045         }
1046
1047         context = dm_alloc(sizeof(struct validate_context));
1048         if (context == NULL)
1049                 goto context_alloc_fail;
1050
1051         ++context->ref_count;
1052
1053         result = core_dc->res_pool->funcs->validate_with_context(
1054                         core_dc, set, stream_count, context, core_dc->current_context);
1055         if (result != DC_OK){
1056                 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
1057                                         "%s: Context validation failed! dc_status:%d\n",
1058                                         __func__,
1059                                         result);
1060                 BREAK_TO_DEBUGGER();
1061                 goto fail;
1062         }
1063
1064         result = dc_commit_context_no_check(dc, context);
1065
1066 fail:
1067         dc_release_validate_context(context);
1068
1069 context_alloc_fail:
1070         return (result == DC_OK);
1071 }
1072
1073 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1074 {
1075         int i;
1076         struct core_dc *core_dc = DC_TO_CORE(dc);
1077         struct validate_context *context = core_dc->current_context;
1078
1079         post_surface_trace(dc);
1080
1081         for (i = 0; i < core_dc->res_pool->pipe_count; i++)
1082                 if (context->res_ctx.pipe_ctx[i].stream == NULL
1083                                 || context->res_ctx.pipe_ctx[i].surface == NULL)
1084                         core_dc->hwss.power_down_front_end(core_dc, i);
1085
1086         /* 3rd param should be true, temp w/a for RV*/
1087 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1088         core_dc->hwss.set_bandwidth(core_dc, context, core_dc->ctx->dce_version != DCN_VERSION_1_0);
1089 #else
1090         core_dc->hwss.set_bandwidth(core_dc, context, true);
1091 #endif
1092         return true;
1093 }
1094
1095 bool dc_commit_surfaces_to_stream(
1096                 struct dc *dc,
1097                 struct dc_surface **new_surfaces,
1098                 uint8_t new_surface_count,
1099                 const struct dc_stream *dc_stream)
1100 {
1101         struct dc_surface_update updates[MAX_SURFACES];
1102         struct dc_flip_addrs flip_addr[MAX_SURFACES];
1103         struct dc_plane_info plane_info[MAX_SURFACES];
1104         struct dc_scaling_info scaling_info[MAX_SURFACES];
1105         int i;
1106         struct dc_stream_update *stream_update =
1107                         dm_alloc(sizeof(struct dc_stream_update));
1108
1109         if (!stream_update) {
1110                 BREAK_TO_DEBUGGER();
1111                 return false;
1112         }
1113
1114         memset(updates, 0, sizeof(updates));
1115         memset(flip_addr, 0, sizeof(flip_addr));
1116         memset(plane_info, 0, sizeof(plane_info));
1117         memset(scaling_info, 0, sizeof(scaling_info));
1118
1119         stream_update->src = dc_stream->src;
1120         stream_update->dst = dc_stream->dst;
1121         stream_update->out_transfer_func = dc_stream->out_transfer_func;
1122
1123         for (i = 0; i < new_surface_count; i++) {
1124                 updates[i].surface = new_surfaces[i];
1125                 updates[i].gamma =
1126                         (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1127                 updates[i].in_transfer_func = new_surfaces[i]->in_transfer_func;
1128                 flip_addr[i].address = new_surfaces[i]->address;
1129                 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1130                 plane_info[i].color_space = new_surfaces[i]->color_space;
1131                 plane_info[i].format = new_surfaces[i]->format;
1132                 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1133                 plane_info[i].rotation = new_surfaces[i]->rotation;
1134                 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1135                 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1136                 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1137                 plane_info[i].visible = new_surfaces[i]->visible;
1138                 plane_info[i].per_pixel_alpha = new_surfaces[i]->per_pixel_alpha;
1139                 plane_info[i].dcc = new_surfaces[i]->dcc;
1140                 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1141                 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1142                 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1143                 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1144
1145                 updates[i].flip_addr = &flip_addr[i];
1146                 updates[i].plane_info = &plane_info[i];
1147                 updates[i].scaling_info = &scaling_info[i];
1148         }
1149
1150         dc_update_surfaces_and_stream(
1151                         dc,
1152                         updates,
1153                         new_surface_count,
1154                         dc_stream, stream_update);
1155
1156         dc_post_update_surfaces_to_stream(dc);
1157
1158         dm_free(stream_update);
1159         return true;
1160 }
1161
1162 void dc_retain_validate_context(struct validate_context *context)
1163 {
1164         ASSERT(context->ref_count > 0);
1165         ++context->ref_count;
1166 }
1167
1168 void dc_release_validate_context(struct validate_context *context)
1169 {
1170         ASSERT(context->ref_count > 0);
1171         --context->ref_count;
1172
1173         if (context->ref_count == 0) {
1174                 dc_resource_validate_ctx_destruct(context);
1175                 dm_free(context);
1176         }
1177 }
1178
1179 static bool is_surface_in_context(
1180                 const struct validate_context *context,
1181                 const struct dc_surface *surface)
1182 {
1183         int j;
1184
1185         for (j = 0; j < MAX_PIPES; j++) {
1186                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1187
1188                 if (surface == pipe_ctx->surface) {
1189                         return true;
1190                 }
1191         }
1192
1193         return false;
1194 }
1195
1196 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1197 {
1198         switch (format) {
1199         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1200         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1201                 return 12;
1202         case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1203         case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1204         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1205         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1206                 return 16;
1207         case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1208         case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1209         case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1210         case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1211                 return 32;
1212         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1213         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1214         case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1215                 return 64;
1216         default:
1217                 ASSERT_CRITICAL(false);
1218                 return -1;
1219         }
1220 }
1221
1222 static enum surface_update_type get_plane_info_update_type(
1223                 const struct dc_surface_update *u,
1224                 int surface_index)
1225 {
1226         struct dc_plane_info temp_plane_info;
1227         memset(&temp_plane_info, 0, sizeof(temp_plane_info));
1228
1229         if (!u->plane_info)
1230                 return UPDATE_TYPE_FAST;
1231
1232         temp_plane_info = *u->plane_info;
1233
1234         /* Copy all parameters that will cause a full update
1235          * from current surface, the rest of the parameters
1236          * from provided plane configuration.
1237          * Perform memory compare and special validation
1238          * for those that can cause fast/medium updates
1239          */
1240
1241         /* Full update parameters */
1242         temp_plane_info.color_space = u->surface->color_space;
1243         temp_plane_info.dcc = u->surface->dcc;
1244         temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1245         temp_plane_info.plane_size = u->surface->plane_size;
1246         temp_plane_info.rotation = u->surface->rotation;
1247         temp_plane_info.stereo_format = u->surface->stereo_format;
1248         temp_plane_info.tiling_info = u->surface->tiling_info;
1249
1250         if (surface_index == 0)
1251                 temp_plane_info.visible = u->plane_info->visible;
1252         else
1253                 temp_plane_info.visible = u->surface->visible;
1254
1255         if (memcmp(u->plane_info, &temp_plane_info,
1256                         sizeof(struct dc_plane_info)) != 0)
1257                 return UPDATE_TYPE_FULL;
1258
1259         if (pixel_format_to_bpp(u->plane_info->format) !=
1260                         pixel_format_to_bpp(u->surface->format)) {
1261                 return UPDATE_TYPE_FULL;
1262         } else {
1263                 return UPDATE_TYPE_MED;
1264         }
1265 }
1266
1267 static enum surface_update_type  get_scaling_info_update_type(
1268                 const struct dc_surface_update *u)
1269 {
1270         if (!u->scaling_info)
1271                 return UPDATE_TYPE_FAST;
1272
1273         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1274                         || u->scaling_info->src_rect.height != u->surface->src_rect.height
1275                         || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1276                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1277                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1278                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
1279                 return UPDATE_TYPE_FULL;
1280
1281         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1282                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
1283                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1284                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1285                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1286                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1287                 return UPDATE_TYPE_MED;
1288
1289         return UPDATE_TYPE_FAST;
1290 }
1291
1292 static enum surface_update_type det_surface_update(
1293                 const struct core_dc *dc,
1294                 const struct dc_surface_update *u,
1295                 int surface_index)
1296 {
1297         const struct validate_context *context = dc->current_context;
1298         enum surface_update_type type = UPDATE_TYPE_FAST;
1299         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1300
1301         if (!is_surface_in_context(context, u->surface))
1302                 return UPDATE_TYPE_FULL;
1303
1304         type = get_plane_info_update_type(u, surface_index);
1305         if (overall_type < type)
1306                 overall_type = type;
1307
1308         type = get_scaling_info_update_type(u);
1309         if (overall_type < type)
1310                 overall_type = type;
1311
1312         if (u->in_transfer_func ||
1313                 u->hdr_static_metadata) {
1314                 if (overall_type < UPDATE_TYPE_MED)
1315                         overall_type = UPDATE_TYPE_MED;
1316         }
1317
1318         return overall_type;
1319 }
1320
1321 enum surface_update_type dc_check_update_surfaces_for_stream(
1322                 struct dc *dc,
1323                 struct dc_surface_update *updates,
1324                 int surface_count,
1325                 struct dc_stream_update *stream_update,
1326                 const struct dc_stream_status *stream_status)
1327 {
1328         struct core_dc *core_dc = DC_TO_CORE(dc);
1329         int i;
1330         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1331
1332         if (stream_status == NULL || stream_status->surface_count != surface_count)
1333                 return UPDATE_TYPE_FULL;
1334
1335         if (stream_update)
1336                 return UPDATE_TYPE_FULL;
1337
1338         for (i = 0 ; i < surface_count; i++) {
1339                 enum surface_update_type type =
1340                                 det_surface_update(core_dc, &updates[i], i);
1341
1342                 if (type == UPDATE_TYPE_FULL)
1343                         return type;
1344
1345                 if (overall_type < type)
1346                         overall_type = type;
1347         }
1348
1349         return overall_type;
1350 }
1351
1352 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1353
1354 void dc_update_surfaces_and_stream(struct dc *dc,
1355                 struct dc_surface_update *srf_updates, int surface_count,
1356                 const struct dc_stream *dc_stream,
1357                 struct dc_stream_update *stream_update)
1358 {
1359         struct core_dc *core_dc = DC_TO_CORE(dc);
1360         struct validate_context *context;
1361         int i, j;
1362         enum surface_update_type update_type;
1363         const struct dc_stream_status *stream_status;
1364         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1365         struct dc_context *dc_ctx = core_dc->ctx;
1366
1367         /* Currently this function do not result in any HW programming
1368          * when called with 0 surface. But proceeding will cause
1369          * SW state to be updated in validate_context. So we might as
1370          * well make it not do anything at all until the hw programming
1371          * is implemented properly to handle 0 surface case.
1372          * TODO: fix hw programming then remove this early return
1373          */
1374         if (surface_count == 0)
1375                 return;
1376
1377         stream_status = dc_stream_get_status(dc_stream);
1378         ASSERT(stream_status);
1379         if (!stream_status)
1380                 return; /* Cannot commit surface to stream that is not committed */
1381
1382 #ifdef ENABLE_FBC
1383         if (srf_updates->flip_addr) {
1384                 if (srf_updates->flip_addr->address.grph.addr.low_part == 0)
1385                         ASSERT(0);
1386         }
1387 #endif
1388         context = core_dc->current_context;
1389
1390         /* update current stream with the new updates */
1391         if (stream_update) {
1392                 if ((stream_update->src.height != 0) &&
1393                                 (stream_update->src.width != 0))
1394                         stream->public.src = stream_update->src;
1395
1396                 if ((stream_update->dst.height != 0) &&
1397                                 (stream_update->dst.width != 0))
1398                         stream->public.dst = stream_update->dst;
1399
1400                 if (stream_update->out_transfer_func &&
1401                                 stream_update->out_transfer_func !=
1402                                                 dc_stream->out_transfer_func) {
1403                         if (dc_stream->out_transfer_func != NULL)
1404                                 dc_transfer_func_release(dc_stream->out_transfer_func);
1405                         dc_transfer_func_retain(stream_update->out_transfer_func);
1406                         stream->public.out_transfer_func =
1407                                 stream_update->out_transfer_func;
1408                 }
1409         }
1410
1411         /* do not perform surface update if surface has invalid dimensions
1412          * (all zero) and no scaling_info is provided
1413          */
1414         if (surface_count > 0 &&
1415                         srf_updates->surface->src_rect.width == 0 &&
1416                         srf_updates->surface->src_rect.height == 0 &&
1417                         srf_updates->surface->dst_rect.width == 0 &&
1418                         srf_updates->surface->dst_rect.height == 0 &&
1419                         !srf_updates->scaling_info) {
1420                 ASSERT(false);
1421                 return;
1422         }
1423
1424         update_type = dc_check_update_surfaces_for_stream(
1425                         dc, srf_updates, surface_count, stream_update, stream_status);
1426
1427         if (update_type >= update_surface_trace_level)
1428                 update_surface_trace(dc, srf_updates, surface_count);
1429
1430         if (update_type >= UPDATE_TYPE_FULL) {
1431                 struct dc_surface *new_surfaces[MAX_SURFACES] = {0};
1432
1433                 for (i = 0; i < surface_count; i++)
1434                         new_surfaces[i] = srf_updates[i].surface;
1435
1436                 /* initialize scratch memory for building context */
1437                 context = dm_alloc(sizeof(*context));
1438                 if (context == NULL)
1439                                 goto context_alloc_fail;
1440
1441                 ++context->ref_count;
1442
1443                 dc_resource_validate_ctx_copy_construct(
1444                                 core_dc->current_context, context);
1445
1446                 /* add surface to context */
1447                 if (!resource_attach_surfaces_to_context(
1448                                 new_surfaces, surface_count, dc_stream,
1449                                 context, core_dc->res_pool)) {
1450                         BREAK_TO_DEBUGGER();
1451                         goto fail;
1452                 }
1453         }
1454
1455         /* save update parameters into surface */
1456         for (i = 0; i < surface_count; i++) {
1457                 struct dc_surface *surface = srf_updates[i].surface;
1458
1459                 if (srf_updates[i].flip_addr) {
1460                         surface->address = srf_updates[i].flip_addr->address;
1461                         surface->flip_immediate =
1462                                         srf_updates[i].flip_addr->flip_immediate;
1463                 }
1464
1465                 if (srf_updates[i].scaling_info) {
1466                         surface->scaling_quality =
1467                                         srf_updates[i].scaling_info->scaling_quality;
1468                         surface->dst_rect =
1469                                         srf_updates[i].scaling_info->dst_rect;
1470                         surface->src_rect =
1471                                         srf_updates[i].scaling_info->src_rect;
1472                         surface->clip_rect =
1473                                         srf_updates[i].scaling_info->clip_rect;
1474                 }
1475
1476                 if (srf_updates[i].plane_info) {
1477                         surface->color_space =
1478                                         srf_updates[i].plane_info->color_space;
1479                         surface->format =
1480                                         srf_updates[i].plane_info->format;
1481                         surface->plane_size =
1482                                         srf_updates[i].plane_info->plane_size;
1483                         surface->rotation =
1484                                         srf_updates[i].plane_info->rotation;
1485                         surface->horizontal_mirror =
1486                                         srf_updates[i].plane_info->horizontal_mirror;
1487                         surface->stereo_format =
1488                                         srf_updates[i].plane_info->stereo_format;
1489                         surface->tiling_info =
1490                                         srf_updates[i].plane_info->tiling_info;
1491                         surface->visible =
1492                                         srf_updates[i].plane_info->visible;
1493                         surface->per_pixel_alpha =
1494                                         srf_updates[i].plane_info->per_pixel_alpha;
1495                         surface->dcc =
1496                                         srf_updates[i].plane_info->dcc;
1497                 }
1498
1499                 if (update_type >= UPDATE_TYPE_MED) {
1500                         for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1501                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1502
1503                                 if (pipe_ctx->surface != surface)
1504                                         continue;
1505
1506                                 resource_build_scaling_params(pipe_ctx);
1507                         }
1508                 }
1509
1510                 if (srf_updates[i].gamma &&
1511                         srf_updates[i].gamma != surface->gamma_correction) {
1512                         if (surface->gamma_correction != NULL)
1513                                 dc_gamma_release(&surface->gamma_correction);
1514
1515                         dc_gamma_retain(srf_updates[i].gamma);
1516                         surface->gamma_correction = srf_updates[i].gamma;
1517                 }
1518
1519                 if (srf_updates[i].in_transfer_func &&
1520                         srf_updates[i].in_transfer_func != surface->in_transfer_func) {
1521                         if (surface->in_transfer_func != NULL)
1522                                 dc_transfer_func_release(
1523                                                 surface->
1524                                                 in_transfer_func);
1525
1526                         dc_transfer_func_retain(
1527                                         srf_updates[i].in_transfer_func);
1528                         surface->in_transfer_func =
1529                                         srf_updates[i].in_transfer_func;
1530                 }
1531
1532                 if (srf_updates[i].hdr_static_metadata)
1533                         surface->hdr_static_ctx =
1534                                 *(srf_updates[i].hdr_static_metadata);
1535         }
1536
1537         if (update_type == UPDATE_TYPE_FULL) {
1538                 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1539                         BREAK_TO_DEBUGGER();
1540                         goto fail;
1541                 } else {
1542                         core_dc->hwss.set_bandwidth(core_dc, context, false);
1543                         context_clock_trace(dc, context);
1544                 }
1545         }
1546
1547         if (update_type > UPDATE_TYPE_FAST) {
1548                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1549                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1550
1551                         core_dc->hwss.wait_for_mpcc_disconnect(core_dc, core_dc->res_pool, pipe_ctx);
1552                 }
1553         }
1554
1555         if (surface_count == 0)
1556                 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1557
1558         /* Lock pipes for provided surfaces, or all active if full update*/
1559         for (i = 0; i < surface_count; i++) {
1560                 struct dc_surface *surface = srf_updates[i].surface;
1561
1562                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1563                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1564
1565                         if (update_type != UPDATE_TYPE_FULL && pipe_ctx->surface != surface)
1566                                 continue;
1567                         if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1568                                 continue;
1569
1570                         if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1571                                 core_dc->hwss.pipe_control_lock(
1572                                                 core_dc,
1573                                                 pipe_ctx,
1574                                                 true);
1575                         }
1576                 }
1577                 if (update_type == UPDATE_TYPE_FULL)
1578                         break;
1579         }
1580
1581         /* Full fe update*/
1582         for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1583                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1584                 struct pipe_ctx *cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1585                 bool is_new_pipe_surface = cur_pipe_ctx->surface != pipe_ctx->surface;
1586                 struct dc_cursor_position position = { 0 };
1587
1588                 if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->surface)
1589                         continue;
1590
1591                 if (!pipe_ctx->top_pipe)
1592                         core_dc->hwss.apply_ctx_for_surface(
1593                                         core_dc, pipe_ctx->surface, context);
1594
1595                 /* TODO: this is a hack w/a for switching from mpo to pipe split */
1596                 dc_stream_set_cursor_position(&pipe_ctx->stream->public, &position);
1597
1598                 if (is_new_pipe_surface) {
1599                         core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1600                         core_dc->hwss.set_input_transfer_func(
1601                                         pipe_ctx, pipe_ctx->surface);
1602                         core_dc->hwss.set_output_transfer_func(
1603                                         pipe_ctx, pipe_ctx->stream);
1604                 }
1605         }
1606
1607         if (update_type > UPDATE_TYPE_FAST)
1608                 context_timing_trace(dc, &context->res_ctx);
1609
1610         /* Perform requested Updates */
1611         for (i = 0; i < surface_count; i++) {
1612                 struct dc_surface *surface = srf_updates[i].surface;
1613
1614                 if (update_type == UPDATE_TYPE_MED)
1615                         core_dc->hwss.apply_ctx_for_surface(
1616                                         core_dc, surface, context);
1617
1618                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1619                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1620
1621                         if (pipe_ctx->surface != surface)
1622                                 continue;
1623
1624                         if (srf_updates[i].flip_addr)
1625                                 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1626
1627                         if (update_type == UPDATE_TYPE_FAST)
1628                                 continue;
1629
1630                         if (srf_updates[i].in_transfer_func)
1631                                 core_dc->hwss.set_input_transfer_func(
1632                                                 pipe_ctx, pipe_ctx->surface);
1633
1634                         if (stream_update != NULL &&
1635                                         stream_update->out_transfer_func != NULL) {
1636                                 core_dc->hwss.set_output_transfer_func(
1637                                                 pipe_ctx, pipe_ctx->stream);
1638                         }
1639
1640                         if (srf_updates[i].hdr_static_metadata) {
1641                                 resource_build_info_frame(pipe_ctx);
1642                                 core_dc->hwss.update_info_frame(pipe_ctx);
1643                         }
1644                 }
1645         }
1646
1647         /* Unlock pipes */
1648         for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
1649                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1650
1651                 for (j = 0; j < surface_count; j++) {
1652                         if (update_type != UPDATE_TYPE_FULL &&
1653                             srf_updates[j].surface != pipe_ctx->surface)
1654                                 continue;
1655                         if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1656                                 continue;
1657
1658                         if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1659                                 core_dc->hwss.pipe_control_lock(
1660                                                 core_dc,
1661                                                 pipe_ctx,
1662                                                 false);
1663                         }
1664                         break;
1665                 }
1666         }
1667
1668         if (core_dc->current_context != context) {
1669                 dc_release_validate_context(core_dc->current_context);
1670                 core_dc->current_context = context;
1671         }
1672         return;
1673
1674 fail:
1675         dc_release_validate_context(context);
1676
1677 context_alloc_fail:
1678         DC_ERROR("Failed to allocate new validate context!\n");
1679 }
1680
1681 uint8_t dc_get_current_stream_count(const struct dc *dc)
1682 {
1683         struct core_dc *core_dc = DC_TO_CORE(dc);
1684         return core_dc->current_context->stream_count;
1685 }
1686
1687 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1688 {
1689         struct core_dc *core_dc = DC_TO_CORE(dc);
1690         if (i < core_dc->current_context->stream_count)
1691                 return &(core_dc->current_context->streams[i]->public);
1692         return NULL;
1693 }
1694
1695 struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1696 {
1697         struct core_dc *core_dc = DC_TO_CORE(dc);
1698         return core_dc->links[link_index];
1699 }
1700
1701 const struct graphics_object_id dc_get_link_id_at_index(
1702         struct dc *dc, uint32_t link_index)
1703 {
1704         struct core_dc *core_dc = DC_TO_CORE(dc);
1705         return core_dc->links[link_index]->link_id;
1706 }
1707
1708 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1709         struct dc *dc, uint32_t link_index)
1710 {
1711         struct core_dc *core_dc = DC_TO_CORE(dc);
1712         return core_dc->links[link_index]->irq_source_hpd;
1713 }
1714
1715 const struct audio **dc_get_audios(struct dc *dc)
1716 {
1717         struct core_dc *core_dc = DC_TO_CORE(dc);
1718         return (const struct audio **)core_dc->res_pool->audios;
1719 }
1720
1721 enum dc_irq_source dc_interrupt_to_irq_source(
1722                 struct dc *dc,
1723                 uint32_t src_id,
1724                 uint32_t ext_id)
1725 {
1726         struct core_dc *core_dc = DC_TO_CORE(dc);
1727         return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1728 }
1729
1730 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1731 {
1732         struct core_dc *core_dc;
1733
1734         if (dc == NULL)
1735                 return;
1736         core_dc = DC_TO_CORE(dc);
1737
1738         dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1739 }
1740
1741 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1742 {
1743         struct core_dc *core_dc = DC_TO_CORE(dc);
1744         dal_irq_service_ack(core_dc->res_pool->irqs, src);
1745 }
1746
1747 void dc_set_power_state(
1748         struct dc *dc,
1749         enum dc_acpi_cm_power_state power_state)
1750 {
1751         struct core_dc *core_dc = DC_TO_CORE(dc);
1752         int ref_count;
1753
1754         switch (power_state) {
1755         case DC_ACPI_CM_POWER_STATE_D0:
1756                 core_dc->hwss.init_hw(core_dc);
1757                 break;
1758         default:
1759
1760                 core_dc->hwss.power_down(core_dc);
1761
1762                 /* Zero out the current context so that on resume we start with
1763                  * clean state, and dc hw programming optimizations will not
1764                  * cause any trouble.
1765                  */
1766
1767                 /* Preserve refcount */
1768                 ref_count = core_dc->current_context->ref_count;
1769                 dc_resource_validate_ctx_destruct(core_dc->current_context);
1770                 memset(core_dc->current_context, 0,
1771                                 sizeof(*core_dc->current_context));
1772                 core_dc->current_context->ref_count = ref_count;
1773
1774                 break;
1775         }
1776
1777 }
1778
1779 void dc_resume(const struct dc *dc)
1780 {
1781         struct core_dc *core_dc = DC_TO_CORE(dc);
1782
1783         uint32_t i;
1784
1785         for (i = 0; i < core_dc->link_count; i++)
1786                 core_link_resume(core_dc->links[i]);
1787 }
1788
1789 bool dc_read_aux_dpcd(
1790                 struct dc *dc,
1791                 uint32_t link_index,
1792                 uint32_t address,
1793                 uint8_t *data,
1794                 uint32_t size)
1795 {
1796         struct core_dc *core_dc = DC_TO_CORE(dc);
1797
1798         struct dc_link *link = core_dc->links[link_index];
1799         enum ddc_result r = dal_ddc_service_read_dpcd_data(
1800                         link->ddc,
1801                         false,
1802                         I2C_MOT_UNDEF,
1803                         address,
1804                         data,
1805                         size);
1806         return r == DDC_RESULT_SUCESSFULL;
1807 }
1808
1809 bool dc_write_aux_dpcd(
1810                 struct dc *dc,
1811                 uint32_t link_index,
1812                 uint32_t address,
1813                 const uint8_t *data,
1814                 uint32_t size)
1815 {
1816         struct core_dc *core_dc = DC_TO_CORE(dc);
1817         struct dc_link *link = core_dc->links[link_index];
1818
1819         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1820                         link->ddc,
1821                         false,
1822                         I2C_MOT_UNDEF,
1823                         address,
1824                         data,
1825                         size);
1826         return r == DDC_RESULT_SUCESSFULL;
1827 }
1828
1829 bool dc_read_aux_i2c(
1830                 struct dc *dc,
1831                 uint32_t link_index,
1832                 enum i2c_mot_mode mot,
1833                 uint32_t address,
1834                 uint8_t *data,
1835                 uint32_t size)
1836 {
1837         struct core_dc *core_dc = DC_TO_CORE(dc);
1838
1839                 struct dc_link *link = core_dc->links[link_index];
1840                 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1841                         link->ddc,
1842                         true,
1843                         mot,
1844                         address,
1845                         data,
1846                         size);
1847                 return r == DDC_RESULT_SUCESSFULL;
1848 }
1849
1850 bool dc_write_aux_i2c(
1851                 struct dc *dc,
1852                 uint32_t link_index,
1853                 enum i2c_mot_mode mot,
1854                 uint32_t address,
1855                 const uint8_t *data,
1856                 uint32_t size)
1857 {
1858         struct core_dc *core_dc = DC_TO_CORE(dc);
1859         struct dc_link *link = core_dc->links[link_index];
1860
1861         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1862                         link->ddc,
1863                         true,
1864                         mot,
1865                         address,
1866                         data,
1867                         size);
1868         return r == DDC_RESULT_SUCESSFULL;
1869 }
1870
1871 bool dc_query_ddc_data(
1872                 struct dc *dc,
1873                 uint32_t link_index,
1874                 uint32_t address,
1875                 uint8_t *write_buf,
1876                 uint32_t write_size,
1877                 uint8_t *read_buf,
1878                 uint32_t read_size) {
1879
1880         struct core_dc *core_dc = DC_TO_CORE(dc);
1881
1882         struct dc_link *link = core_dc->links[link_index];
1883
1884         bool result = dal_ddc_service_query_ddc_data(
1885                         link->ddc,
1886                         address,
1887                         write_buf,
1888                         write_size,
1889                         read_buf,
1890                         read_size);
1891
1892         return result;
1893 }
1894
1895 bool dc_submit_i2c(
1896                 struct dc *dc,
1897                 uint32_t link_index,
1898                 struct i2c_command *cmd)
1899 {
1900         struct core_dc *core_dc = DC_TO_CORE(dc);
1901
1902         struct dc_link *link = core_dc->links[link_index];
1903         struct ddc_service *ddc = link->ddc;
1904
1905         return dal_i2caux_submit_i2c_command(
1906                 ddc->ctx->i2caux,
1907                 ddc->ddc_pin,
1908                 cmd);
1909 }
1910
1911 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1912 {
1913         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1914                 BREAK_TO_DEBUGGER();
1915                 return false;
1916         }
1917
1918         dc_sink_retain(sink);
1919
1920         dc_link->remote_sinks[dc_link->sink_count] = sink;
1921         dc_link->sink_count++;
1922
1923         return true;
1924 }
1925
1926 struct dc_sink *dc_link_add_remote_sink(
1927                 struct dc_link *link,
1928                 const uint8_t *edid,
1929                 int len,
1930                 struct dc_sink_init_data *init_data)
1931 {
1932         struct dc_sink *dc_sink;
1933         enum dc_edid_status edid_status;
1934
1935         if (len > MAX_EDID_BUFFER_SIZE) {
1936                 dm_error("Max EDID buffer size breached!\n");
1937                 return NULL;
1938         }
1939
1940         if (!init_data) {
1941                 BREAK_TO_DEBUGGER();
1942                 return NULL;
1943         }
1944
1945         if (!init_data->link) {
1946                 BREAK_TO_DEBUGGER();
1947                 return NULL;
1948         }
1949
1950         dc_sink = dc_sink_create(init_data);
1951
1952         if (!dc_sink)
1953                 return NULL;
1954
1955         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1956         dc_sink->dc_edid.length = len;
1957
1958         if (!link_add_remote_sink_helper(
1959                         link,
1960                         dc_sink))
1961                 goto fail_add_sink;
1962
1963         edid_status = dm_helpers_parse_edid_caps(
1964                         link->ctx,
1965                         &dc_sink->dc_edid,
1966                         &dc_sink->edid_caps);
1967
1968         if (edid_status != EDID_OK)
1969                 goto fail;
1970
1971         return dc_sink;
1972 fail:
1973         dc_link_remove_remote_sink(link, dc_sink);
1974 fail_add_sink:
1975         dc_sink_release(dc_sink);
1976         return NULL;
1977 }
1978
1979 void dc_link_set_sink(struct dc_link *link, struct dc_sink *sink)
1980 {
1981         link->local_sink = sink;
1982
1983         if (sink == NULL) {
1984                 link->type = dc_connection_none;
1985         } else {
1986                 link->type = dc_connection_single;
1987         }
1988 }
1989
1990 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1991 {
1992         int i;
1993
1994         if (!link->sink_count) {
1995                 BREAK_TO_DEBUGGER();
1996                 return;
1997         }
1998
1999         for (i = 0; i < link->sink_count; i++) {
2000                 if (link->remote_sinks[i] == sink) {
2001                         dc_sink_release(sink);
2002                         link->remote_sinks[i] = NULL;
2003
2004                         /* shrink array to remove empty place */
2005                         while (i < link->sink_count - 1) {
2006                                 link->remote_sinks[i] = link->remote_sinks[i+1];
2007                                 i++;
2008                         }
2009                         link->remote_sinks[i] = NULL;
2010                         link->sink_count--;
2011                         return;
2012                 }
2013         }
2014 }
2015
2016 bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
2017 {
2018         int i;
2019         struct core_dc *core_dc = DC_TO_CORE(dc);
2020         struct mem_input *mi = NULL;
2021
2022         for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
2023                 if (core_dc->res_pool->mis[i] != NULL) {
2024                         mi = core_dc->res_pool->mis[i];
2025                         break;
2026                 }
2027         }
2028         if (mi == NULL) {
2029                 dm_error("no mem_input!\n");
2030                 return false;
2031         }
2032
2033         if (core_dc->hwss.update_dchub)
2034                 core_dc->hwss.update_dchub(core_dc->hwseq, dh_data);
2035         else
2036                 ASSERT(core_dc->hwss.update_dchub);
2037
2038
2039         return true;
2040
2041 }
2042
2043 void dc_log_hw_state(struct dc *dc)
2044 {
2045         struct core_dc *core_dc = DC_TO_CORE(dc);
2046
2047         if (core_dc->hwss.log_hw_state)
2048                 core_dc->hwss.log_hw_state(core_dc);
2049 }