drm/amd/display: fix decide_link_settings
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32
33 #include "resource.h"
34
35 #include "clock_source.h"
36 #include "dc_bios_types.h"
37
38 #include "dce_calcs.h"
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "timing_generator.h"
43 #include "virtual/virtual_link_encoder.h"
44
45 #include "link_hwss.h"
46 #include "link_encoder.h"
47
48 #include "dc_link_ddc.h"
49 #include "dm_helpers.h"
50 #include "mem_input.h"
51
52 /*******************************************************************************
53  * Private functions
54  ******************************************************************************/
55 static void destroy_links(struct core_dc *dc)
56 {
57         uint32_t i;
58
59         for (i = 0; i < dc->link_count; i++) {
60                 if (NULL != dc->links[i])
61                         link_destroy(&dc->links[i]);
62         }
63 }
64
65 static bool create_links(
66                 struct core_dc *dc,
67                 uint32_t num_virtual_links)
68 {
69         int i;
70         int connectors_num;
71         struct dc_bios *bios = dc->ctx->dc_bios;
72
73         dc->link_count = 0;
74
75         connectors_num = bios->funcs->get_connectors_number(bios);
76
77         if (connectors_num > ENUM_ID_COUNT) {
78                 dm_error(
79                         "DC: Number of connectors %d exceeds maximum of %d!\n",
80                         connectors_num,
81                         ENUM_ID_COUNT);
82                 return false;
83         }
84
85         if (connectors_num == 0 && num_virtual_links == 0) {
86                 dm_error("DC: Number of connectors is zero!\n");
87         }
88
89         dm_output_to_console(
90                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
91                 __func__,
92                 connectors_num,
93                 num_virtual_links);
94
95         for (i = 0; i < connectors_num; i++) {
96                 struct link_init_data link_init_params = {0};
97                 struct core_link *link;
98
99                 link_init_params.ctx = dc->ctx;
100                 /* next BIOS object table connector */
101                 link_init_params.connector_index = i;
102                 link_init_params.link_index = dc->link_count;
103                 link_init_params.dc = dc;
104                 link = link_create(&link_init_params);
105
106                 if (link) {
107                         dc->links[dc->link_count] = link;
108                         link->dc = dc;
109                         ++dc->link_count;
110                 }
111         }
112
113         for (i = 0; i < num_virtual_links; i++) {
114                 struct core_link *link = dm_alloc(sizeof(*link));
115                 struct encoder_init_data enc_init = {0};
116
117                 if (link == NULL) {
118                         BREAK_TO_DEBUGGER();
119                         goto failed_alloc;
120                 }
121
122                 link->ctx = dc->ctx;
123                 link->dc = dc;
124                 link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
125                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
126                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
127                 link->link_id.enum_id = ENUM_ID_1;
128                 link->link_enc = dm_alloc(sizeof(*link->link_enc));
129
130                 enc_init.ctx = dc->ctx;
131                 enc_init.channel = CHANNEL_ID_UNKNOWN;
132                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
133                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
134                 enc_init.connector = link->link_id;
135                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
136                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
137                 enc_init.encoder.enum_id = ENUM_ID_1;
138                 virtual_link_encoder_construct(link->link_enc, &enc_init);
139
140                 link->public.link_index = dc->link_count;
141                 dc->links[dc->link_count] = link;
142                 dc->link_count++;
143         }
144
145         return true;
146
147 failed_alloc:
148         return false;
149 }
150
151 static bool stream_adjust_vmin_vmax(struct dc *dc,
152                 const struct dc_stream **stream, int num_streams,
153                 int vmin, int vmax)
154 {
155         /* TODO: Support multiple streams */
156         struct core_dc *core_dc = DC_TO_CORE(dc);
157         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
158         int i = 0;
159         bool ret = false;
160
161         for (i = 0; i < MAX_PIPES; i++) {
162                 struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
163
164                 if (pipe->stream == core_stream && pipe->stream_enc) {
165                         core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
166
167                         /* build and update the info frame */
168                         resource_build_info_frame(pipe);
169                         core_dc->hwss.update_info_frame(pipe);
170
171                         ret = true;
172                 }
173         }
174         return ret;
175 }
176
177 static bool stream_get_crtc_position(struct dc *dc,
178                 const struct dc_stream **stream, int num_streams,
179                 unsigned int *v_pos, unsigned int *nom_v_pos)
180 {
181         /* TODO: Support multiple streams */
182         struct core_dc *core_dc = DC_TO_CORE(dc);
183         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[0]);
184         int i = 0;
185         bool ret = false;
186         struct crtc_position position;
187
188         for (i = 0; i < MAX_PIPES; i++) {
189                 struct pipe_ctx *pipe =
190                                 &core_dc->current_context->res_ctx.pipe_ctx[i];
191
192                 if (pipe->stream == core_stream && pipe->stream_enc) {
193                         core_dc->hwss.get_position(&pipe, 1, &position);
194
195                         *v_pos = position.vertical_count;
196                         *nom_v_pos = position.nominal_vcount;
197                         ret = true;
198                 }
199         }
200         return ret;
201 }
202
203 static bool set_gamut_remap(struct dc *dc, const struct dc_stream *stream)
204 {
205         struct core_dc *core_dc = DC_TO_CORE(dc);
206         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
207         int i = 0;
208         bool ret = false;
209         struct pipe_ctx *pipes;
210
211         for (i = 0; i < MAX_PIPES; i++) {
212                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
213                                 == core_stream) {
214
215                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
216                         core_dc->hwss.program_gamut_remap(pipes);
217                         ret = true;
218                 }
219         }
220
221         return ret;
222 }
223
224 static bool program_csc_matrix(struct dc *dc, const struct dc_stream *stream)
225 {
226         struct core_dc *core_dc = DC_TO_CORE(dc);
227         struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
228         int i = 0;
229         bool ret = false;
230         struct pipe_ctx *pipes;
231
232         for (i = 0; i < MAX_PIPES; i++) {
233                 if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
234                                 == core_stream) {
235
236                         pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
237                         core_dc->hwss.program_csc_matrix(pipes,
238                         core_stream->public.output_color_space,
239                         core_stream->public.csc_color_matrix.matrix);
240                         ret = true;
241                 }
242         }
243
244         return ret;
245 }
246
247 static void set_static_screen_events(struct dc *dc,
248                 const struct dc_stream **stream,
249                 int num_streams,
250                 const struct dc_static_screen_events *events)
251 {
252         struct core_dc *core_dc = DC_TO_CORE(dc);
253         int i = 0;
254         int j = 0;
255         struct pipe_ctx *pipes_affected[MAX_PIPES];
256         int num_pipes_affected = 0;
257
258         for (i = 0; i < num_streams; i++) {
259                 struct core_stream *core_stream = DC_STREAM_TO_CORE(stream[i]);
260
261                 for (j = 0; j < MAX_PIPES; j++) {
262                         if (core_dc->current_context->res_ctx.pipe_ctx[j].stream
263                                         == core_stream) {
264                                 pipes_affected[num_pipes_affected++] =
265                                                 &core_dc->current_context->res_ctx.pipe_ctx[j];
266                         }
267                 }
268         }
269
270         core_dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
271 }
272
273 static void set_drive_settings(struct dc *dc,
274                 struct link_training_settings *lt_settings,
275                 const struct dc_link *link)
276 {
277         struct core_dc *core_dc = DC_TO_CORE(dc);
278         int i;
279
280         for (i = 0; i < core_dc->link_count; i++) {
281                 if (&core_dc->links[i]->public == link)
282                         break;
283         }
284
285         if (i >= core_dc->link_count)
286                 ASSERT_CRITICAL(false);
287
288         dc_link_dp_set_drive_settings(&core_dc->links[i]->public, lt_settings);
289 }
290
291 static void perform_link_training(struct dc *dc,
292                 struct dc_link_settings *link_setting,
293                 bool skip_video_pattern)
294 {
295         struct core_dc *core_dc = DC_TO_CORE(dc);
296         int i;
297
298         for (i = 0; i < core_dc->link_count; i++)
299                 dc_link_dp_perform_link_training(
300                         &core_dc->links[i]->public,
301                         link_setting,
302                         skip_video_pattern);
303 }
304
305 static void set_preferred_link_settings(struct dc *dc,
306                 struct dc_link_settings *link_setting,
307                 const struct dc_link *link)
308 {
309         struct core_link *core_link = DC_LINK_TO_CORE(link);
310
311         core_link->public.preferred_link_setting =
312                                 *link_setting;
313         dp_retrain_link_dp_test(core_link, link_setting, false);
314 }
315
316 static void enable_hpd(const struct dc_link *link)
317 {
318         dc_link_dp_enable_hpd(link);
319 }
320
321 static void disable_hpd(const struct dc_link *link)
322 {
323         dc_link_dp_disable_hpd(link);
324 }
325
326
327 static void set_test_pattern(
328                 const struct dc_link *link,
329                 enum dp_test_pattern test_pattern,
330                 const struct link_training_settings *p_link_settings,
331                 const unsigned char *p_custom_pattern,
332                 unsigned int cust_pattern_size)
333 {
334         if (link != NULL)
335                 dc_link_dp_set_test_pattern(
336                         link,
337                         test_pattern,
338                         p_link_settings,
339                         p_custom_pattern,
340                         cust_pattern_size);
341 }
342
343 void set_dither_option(const struct dc_stream *dc_stream,
344                 enum dc_dither_option option)
345 {
346         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
347         struct bit_depth_reduction_params params;
348         struct core_link *core_link = DC_LINK_TO_CORE(stream->status.link);
349         struct pipe_ctx *pipes =
350                         core_link->dc->current_context->res_ctx.pipe_ctx;
351
352         memset(&params, 0, sizeof(params));
353         if (!stream)
354                 return;
355         if (option > DITHER_OPTION_MAX)
356                 return;
357         if (option == DITHER_OPTION_DEFAULT) {
358                 switch (stream->public.timing.display_color_depth) {
359                 case COLOR_DEPTH_666:
360                         stream->public.dither_option = DITHER_OPTION_SPATIAL6;
361                         break;
362                 case COLOR_DEPTH_888:
363                         stream->public.dither_option = DITHER_OPTION_SPATIAL8;
364                         break;
365                 case COLOR_DEPTH_101010:
366                         stream->public.dither_option = DITHER_OPTION_SPATIAL10;
367                         break;
368                 default:
369                         option = DITHER_OPTION_DISABLE;
370                 }
371         } else {
372                 stream->public.dither_option = option;
373         }
374         resource_build_bit_depth_reduction_params(stream,
375                                 &params);
376         stream->bit_depth_params = params;
377         pipes->opp->funcs->
378                 opp_program_bit_depth_reduction(pipes->opp, &params);
379 }
380
381 static void allocate_dc_stream_funcs(struct core_dc *core_dc)
382 {
383         if (core_dc->hwss.set_drr != NULL) {
384                 core_dc->public.stream_funcs.adjust_vmin_vmax =
385                                 stream_adjust_vmin_vmax;
386         }
387
388         core_dc->public.stream_funcs.set_static_screen_events =
389                         set_static_screen_events;
390
391         core_dc->public.stream_funcs.get_crtc_position =
392                         stream_get_crtc_position;
393
394         core_dc->public.stream_funcs.set_gamut_remap =
395                         set_gamut_remap;
396
397         core_dc->public.stream_funcs.program_csc_matrix =
398                         program_csc_matrix;
399
400         core_dc->public.stream_funcs.set_dither_option =
401                         set_dither_option;
402
403         core_dc->public.link_funcs.set_drive_settings =
404                         set_drive_settings;
405
406         core_dc->public.link_funcs.perform_link_training =
407                         perform_link_training;
408
409         core_dc->public.link_funcs.set_preferred_link_settings =
410                         set_preferred_link_settings;
411
412         core_dc->public.link_funcs.enable_hpd =
413                         enable_hpd;
414
415         core_dc->public.link_funcs.disable_hpd =
416                         disable_hpd;
417
418         core_dc->public.link_funcs.set_test_pattern =
419                         set_test_pattern;
420 }
421
422 static void destruct(struct core_dc *dc)
423 {
424         dc_release_validate_context(dc->current_context);
425         dc->current_context = NULL;
426
427         destroy_links(dc);
428
429         dc_destroy_resource_pool(dc);
430
431         if (dc->ctx->gpio_service)
432                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
433
434         if (dc->ctx->i2caux)
435                 dal_i2caux_destroy(&dc->ctx->i2caux);
436
437         if (dc->ctx->created_bios)
438                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
439
440         if (dc->ctx->logger)
441                 dal_logger_destroy(&dc->ctx->logger);
442
443         dm_free(dc->ctx);
444         dc->ctx = NULL;
445 }
446
447 static bool construct(struct core_dc *dc,
448                 const struct dc_init_data *init_params)
449 {
450         struct dal_logger *logger;
451         struct dc_context *dc_ctx = dm_alloc(sizeof(*dc_ctx));
452         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
453
454         if (!dc_ctx) {
455                 dm_error("%s: failed to create ctx\n", __func__);
456                 goto ctx_fail;
457         }
458
459         dc->current_context = dm_alloc(sizeof(*dc->current_context));
460
461         if (!dc->current_context) {
462                 dm_error("%s: failed to create validate ctx\n", __func__);
463                 goto val_ctx_fail;
464         }
465
466         dc->current_context->ref_count++;
467
468         dc_ctx->cgs_device = init_params->cgs_device;
469         dc_ctx->driver_context = init_params->driver;
470         dc_ctx->dc = &dc->public;
471         dc_ctx->asic_id = init_params->asic_id;
472
473         /* Create logger */
474         logger = dal_logger_create(dc_ctx);
475
476         if (!logger) {
477                 /* can *not* call logger. call base driver 'print error' */
478                 dm_error("%s: failed to create Logger!\n", __func__);
479                 goto logger_fail;
480         }
481         dc_ctx->logger = logger;
482         dc->ctx = dc_ctx;
483         dc->ctx->dce_environment = init_params->dce_environment;
484
485         dc_version = resource_parse_asic_id(init_params->asic_id);
486         dc->ctx->dce_version = dc_version;
487
488         /* Resource should construct all asic specific resources.
489          * This should be the only place where we need to parse the asic id
490          */
491         if (init_params->vbios_override)
492                 dc_ctx->dc_bios = init_params->vbios_override;
493         else {
494                 /* Create BIOS parser */
495                 struct bp_init_data bp_init_data;
496
497                 bp_init_data.ctx = dc_ctx;
498                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
499
500                 dc_ctx->dc_bios = dal_bios_parser_create(
501                                 &bp_init_data, dc_version);
502
503                 if (!dc_ctx->dc_bios) {
504                         ASSERT_CRITICAL(false);
505                         goto bios_fail;
506                 }
507
508                 dc_ctx->created_bios = true;
509                 }
510
511         /* Create I2C AUX */
512         dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
513
514         if (!dc_ctx->i2caux) {
515                 ASSERT_CRITICAL(false);
516                 goto failed_to_create_i2caux;
517         }
518
519         /* Create GPIO service */
520         dc_ctx->gpio_service = dal_gpio_service_create(
521                         dc_version,
522                         dc_ctx->dce_environment,
523                         dc_ctx);
524
525         if (!dc_ctx->gpio_service) {
526                 ASSERT_CRITICAL(false);
527                 goto gpio_fail;
528         }
529
530         dc->res_pool = dc_create_resource_pool(
531                         dc,
532                         init_params->num_virtual_links,
533                         dc_version,
534                         init_params->asic_id);
535         if (!dc->res_pool)
536                 goto create_resource_fail;
537
538         if (!create_links(dc, init_params->num_virtual_links))
539                 goto create_links_fail;
540
541         allocate_dc_stream_funcs(dc);
542
543         return true;
544
545         /**** error handling here ****/
546 create_links_fail:
547 create_resource_fail:
548 gpio_fail:
549 failed_to_create_i2caux:
550 bios_fail:
551 logger_fail:
552 val_ctx_fail:
553 ctx_fail:
554         destruct(dc);
555         return false;
556 }
557
558 /*
559 void ProgramPixelDurationV(unsigned int pixelClockInKHz )
560 {
561         fixed31_32 pixel_duration = Fixed31_32(100000000, pixelClockInKHz) * 10;
562         unsigned int pixDurationInPico = round(pixel_duration);
563
564         DPG_PIPE_ARBITRATION_CONTROL1 arb_control;
565
566         arb_control.u32All = ReadReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1);
567         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
568         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
569
570         arb_control.u32All = ReadReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1);
571         arb_control.bits.PIXEL_DURATION = pixDurationInPico;
572         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL1, arb_control.u32All);
573
574         WriteReg (mmDPGV0_PIPE_ARBITRATION_CONTROL2, 0x4000800);
575         WriteReg (mmDPGV0_REPEATER_PROGRAM, 0x11);
576
577         WriteReg (mmDPGV1_PIPE_ARBITRATION_CONTROL2, 0x4000800);
578         WriteReg (mmDPGV1_REPEATER_PROGRAM, 0x11);
579 }
580 */
581
582 /*******************************************************************************
583  * Public functions
584  ******************************************************************************/
585
586 struct dc *dc_create(const struct dc_init_data *init_params)
587  {
588         struct core_dc *core_dc = dm_alloc(sizeof(*core_dc));
589         unsigned int full_pipe_count;
590
591         if (NULL == core_dc)
592                 goto alloc_fail;
593
594         if (false == construct(core_dc, init_params))
595                 goto construct_fail;
596
597         /*TODO: separate HW and SW initialization*/
598         core_dc->hwss.init_hw(core_dc);
599
600         full_pipe_count = core_dc->res_pool->pipe_count;
601         if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
602                 full_pipe_count--;
603         core_dc->public.caps.max_streams = min(
604                         full_pipe_count,
605                         core_dc->res_pool->stream_enc_count);
606
607         core_dc->public.caps.max_links = core_dc->link_count;
608         core_dc->public.caps.max_audios = core_dc->res_pool->audio_count;
609
610         core_dc->public.config = init_params->flags;
611
612         dm_logger_write(core_dc->ctx->logger, LOG_DC,
613                         "Display Core initialized\n");
614
615
616         /* TODO: missing feature to be enabled */
617         core_dc->public.debug.disable_dfs_bypass = true;
618
619         return &core_dc->public;
620
621 construct_fail:
622         dm_free(core_dc);
623
624 alloc_fail:
625         return NULL;
626 }
627
628 void dc_destroy(struct dc **dc)
629 {
630         struct core_dc *core_dc = DC_TO_CORE(*dc);
631         destruct(core_dc);
632         dm_free(core_dc);
633         *dc = NULL;
634 }
635
636 static bool is_validation_required(
637                 const struct core_dc *dc,
638                 const struct dc_validation_set set[],
639                 int set_count)
640 {
641         const struct validate_context *context = dc->current_context;
642         int i, j;
643
644         if (context->stream_count != set_count)
645                 return true;
646
647         for (i = 0; i < set_count; i++) {
648
649                 if (set[i].surface_count != context->stream_status[i].surface_count)
650                         return true;
651                 if (!is_stream_unchanged(DC_STREAM_TO_CORE(set[i].stream), context->streams[i]))
652                         return true;
653
654                 for (j = 0; j < set[i].surface_count; j++) {
655                         struct dc_surface temp_surf;
656                         memset(&temp_surf, 0, sizeof(temp_surf));
657
658                         temp_surf = *context->stream_status[i].surfaces[j];
659                         temp_surf.clip_rect = set[i].surfaces[j]->clip_rect;
660                         temp_surf.dst_rect.x = set[i].surfaces[j]->dst_rect.x;
661                         temp_surf.dst_rect.y = set[i].surfaces[j]->dst_rect.y;
662
663                         if (memcmp(&temp_surf, set[i].surfaces[j], sizeof(temp_surf)) != 0)
664                                 return true;
665                 }
666         }
667
668         return false;
669 }
670
671 struct validate_context *dc_get_validate_context(
672                 const struct dc *dc,
673                 const struct dc_validation_set set[],
674                 uint8_t set_count)
675 {
676         struct core_dc *core_dc = DC_TO_CORE(dc);
677         enum dc_status result = DC_ERROR_UNEXPECTED;
678         struct validate_context *context;
679
680         context = dm_alloc(sizeof(struct validate_context));
681         if (context == NULL)
682                 goto context_alloc_fail;
683
684         ++context->ref_count;
685
686         if (!is_validation_required(core_dc, set, set_count)) {
687                 dc_resource_validate_ctx_copy_construct(core_dc->current_context, context);
688                 return context;
689         }
690
691         result = core_dc->res_pool->funcs->validate_with_context(
692                         core_dc, set, set_count, context, core_dc->current_context);
693
694 context_alloc_fail:
695         if (result != DC_OK) {
696                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
697                                 "%s:resource validation failed, dc_status:%d\n",
698                                 __func__,
699                                 result);
700
701                 dc_release_validate_context(context);
702                 context = NULL;
703         }
704
705         return context;
706
707 }
708
709 bool dc_validate_resources(
710                 const struct dc *dc,
711                 const struct dc_validation_set set[],
712                 uint8_t set_count)
713 {
714         struct core_dc *core_dc = DC_TO_CORE(dc);
715         enum dc_status result = DC_ERROR_UNEXPECTED;
716         struct validate_context *context;
717
718         context = dm_alloc(sizeof(struct validate_context));
719         if (context == NULL)
720                 goto context_alloc_fail;
721
722         ++context->ref_count;
723
724         result = core_dc->res_pool->funcs->validate_with_context(
725                                 core_dc, set, set_count, context, NULL);
726
727 context_alloc_fail:
728         if (result != DC_OK) {
729                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
730                                 "%s:resource validation failed, dc_status:%d\n",
731                                 __func__,
732                                 result);
733         }
734
735         dc_release_validate_context(context);
736         context = NULL;
737
738         return result == DC_OK;
739 }
740
741 bool dc_validate_guaranteed(
742                 const struct dc *dc,
743                 const struct dc_stream *stream)
744 {
745         struct core_dc *core_dc = DC_TO_CORE(dc);
746         enum dc_status result = DC_ERROR_UNEXPECTED;
747         struct validate_context *context;
748
749         context = dm_alloc(sizeof(struct validate_context));
750         if (context == NULL)
751                 goto context_alloc_fail;
752
753         ++context->ref_count;
754
755         result = core_dc->res_pool->funcs->validate_guaranteed(
756                                         core_dc, stream, context);
757
758         dc_release_validate_context(context);
759
760 context_alloc_fail:
761         if (result != DC_OK) {
762                 dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
763                         "%s:guaranteed validation failed, dc_status:%d\n",
764                         __func__,
765                         result);
766                 }
767
768         return (result == DC_OK);
769 }
770
771 static void program_timing_sync(
772                 struct core_dc *core_dc,
773                 struct validate_context *ctx)
774 {
775         int i, j;
776         int group_index = 0;
777         int pipe_count = core_dc->res_pool->pipe_count;
778         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
779
780         for (i = 0; i < pipe_count; i++) {
781                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
782                         continue;
783
784                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
785         }
786
787         for (i = 0; i < pipe_count; i++) {
788                 int group_size = 1;
789                 struct pipe_ctx *pipe_set[MAX_PIPES];
790
791                 if (!unsynced_pipes[i])
792                         continue;
793
794                 pipe_set[0] = unsynced_pipes[i];
795                 unsynced_pipes[i] = NULL;
796
797                 /* Add tg to the set, search rest of the tg's for ones with
798                  * same timing, add all tgs with same timing to the group
799                  */
800                 for (j = i + 1; j < pipe_count; j++) {
801                         if (!unsynced_pipes[j])
802                                 continue;
803
804                         if (resource_are_streams_timing_synchronizable(
805                                         unsynced_pipes[j]->stream,
806                                         pipe_set[0]->stream)) {
807                                 pipe_set[group_size] = unsynced_pipes[j];
808                                 unsynced_pipes[j] = NULL;
809                                 group_size++;
810                         }
811                 }
812
813                 /* set first unblanked pipe as master */
814                 for (j = 0; j < group_size; j++) {
815                         struct pipe_ctx *temp;
816
817                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
818                                 if (j == 0)
819                                         break;
820
821                                 temp = pipe_set[0];
822                                 pipe_set[0] = pipe_set[j];
823                                 pipe_set[j] = temp;
824                                 break;
825                         }
826                 }
827
828                 /* remove any other unblanked pipes as they have already been synced */
829                 for (j = j + 1; j < group_size; j++) {
830                         if (!pipe_set[j]->tg->funcs->is_blanked(pipe_set[j]->tg)) {
831                                 group_size--;
832                                 pipe_set[j] = pipe_set[group_size];
833                                 j--;
834                         }
835                 }
836
837                 if (group_size > 1) {
838                         core_dc->hwss.enable_timing_synchronization(
839                                 core_dc, group_index, group_size, pipe_set);
840                         group_index++;
841                 }
842         }
843 }
844
845 static bool context_changed(
846                 struct core_dc *dc,
847                 struct validate_context *context)
848 {
849         uint8_t i;
850
851         if (context->stream_count != dc->current_context->stream_count)
852                 return true;
853
854         for (i = 0; i < dc->current_context->stream_count; i++) {
855                 if (&dc->current_context->streams[i]->public != &context->streams[i]->public)
856                         return true;
857         }
858
859         return false;
860 }
861
862 static bool streams_changed(
863                 struct core_dc *dc,
864                 const struct dc_stream *streams[],
865                 uint8_t stream_count)
866 {
867         uint8_t i;
868
869         if (stream_count != dc->current_context->stream_count)
870                 return true;
871
872         for (i = 0; i < dc->current_context->stream_count; i++) {
873                 if (&dc->current_context->streams[i]->public != streams[i])
874                         return true;
875         }
876
877         return false;
878 }
879
880 bool dc_enable_stereo(
881         struct dc *dc,
882         struct validate_context *context,
883         const struct dc_stream *streams[],
884         uint8_t stream_count)
885 {
886         bool ret = true;
887         int i, j;
888         struct pipe_ctx *pipe;
889         struct core_dc *core_dc = DC_TO_CORE(dc);
890
891 #ifdef ENABLE_FBC
892         struct compressor *fbc_compressor = core_dc->fbc_compressor;
893 #endif
894
895         for (i = 0; i < MAX_PIPES; i++) {
896                 if (context != NULL)
897                         pipe = &context->res_ctx.pipe_ctx[i];
898                 else
899                         pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
900                 for (j = 0 ; pipe && j < stream_count; j++)  {
901                         if (streams[j] && streams[j] == &pipe->stream->public &&
902                                 core_dc->hwss.setup_stereo)
903                                 core_dc->hwss.setup_stereo(pipe, core_dc);
904                 }
905         }
906
907 #ifdef ENABLE_FBC
908         if (fbc_compressor != NULL &&
909             fbc_compressor->funcs->is_fbc_enabled_in_hw(core_dc->fbc_compressor,
910                                                         &pipe->tg->inst))
911                 fbc_compressor->funcs->disable_fbc(fbc_compressor);
912
913 #endif
914         return ret;
915 }
916
917
918 /*
919  * Applies given context to HW and copy it into current context.
920  * It's up to the user to release the src context afterwards.
921  */
922 static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *context)
923 {
924         struct core_dc *core_dc = DC_TO_CORE(dc);
925         struct dc_bios *dcb = core_dc->ctx->dc_bios;
926         enum dc_status result = DC_ERROR_UNEXPECTED;
927         struct pipe_ctx *pipe;
928         int i, j, k, l;
929         const struct dc_stream *dc_streams[MAX_STREAMS] = {0};
930
931         for (i = 0; i < context->stream_count; i++)
932                 dc_streams[i] =  &context->streams[i]->public;
933
934         if (!dcb->funcs->is_accelerated_mode(dcb))
935                 core_dc->hwss.enable_accelerated_mode(core_dc);
936
937         result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
938
939         program_timing_sync(core_dc, context);
940
941         for (i = 0; i < context->stream_count; i++) {
942                 const struct core_sink *sink = context->streams[i]->sink;
943
944                 for (j = 0; j < context->stream_status[i].surface_count; j++) {
945                         struct core_surface *surface =
946                                         DC_SURFACE_TO_CORE(context->stream_status[i].surfaces[j]);
947
948                         core_dc->hwss.apply_ctx_for_surface(core_dc, surface, context);
949
950                         /*
951                          * enable stereo
952                          * TODO rework dc_enable_stereo call to work with validation sets?
953                          */
954                         for (k = 0; k < MAX_PIPES; k++) {
955                                 pipe = &context->res_ctx.pipe_ctx[k];
956
957                                 for (l = 0 ; pipe && l < context->stream_count; l++)  {
958                                         if (context->streams[l] &&
959                                             context->streams[l] == pipe->stream &&
960                                             core_dc->hwss.setup_stereo)
961                                                 core_dc->hwss.setup_stereo(pipe, core_dc);
962                                 }
963                         }
964                 }
965
966                 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
967                                 context->streams[i]->public.timing.h_addressable,
968                                 context->streams[i]->public.timing.v_addressable,
969                                 context->streams[i]->public.timing.h_total,
970                                 context->streams[i]->public.timing.v_total,
971                                 context->streams[i]->public.timing.pix_clk_khz);
972         }
973
974         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
975
976         dc_release_validate_context(core_dc->current_context);
977
978         core_dc->current_context = context;
979
980         dc_retain_validate_context(core_dc->current_context);
981
982         return (result == DC_OK);
983 }
984
985 bool dc_commit_context(struct dc *dc, struct validate_context *context)
986 {
987         enum dc_status result = DC_ERROR_UNEXPECTED;
988         struct core_dc *core_dc = DC_TO_CORE(dc);
989         int i;
990
991         if (false == context_changed(core_dc, context))
992                 return DC_OK;
993
994         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
995                                 __func__, context->stream_count);
996
997         for (i = 0; i < context->stream_count; i++) {
998                 const struct dc_stream *stream = &context->streams[i]->public;
999
1000                 dc_stream_log(stream,
1001                                 core_dc->ctx->logger,
1002                                 LOG_DC);
1003         }
1004
1005         result = dc_commit_context_no_check(dc, context);
1006
1007         return (result == DC_OK);
1008 }
1009
1010
1011 bool dc_commit_streams(
1012         struct dc *dc,
1013         const struct dc_stream *streams[],
1014         uint8_t stream_count)
1015 {
1016         struct core_dc *core_dc = DC_TO_CORE(dc);
1017         enum dc_status result = DC_ERROR_UNEXPECTED;
1018         struct validate_context *context;
1019         struct dc_validation_set set[MAX_STREAMS] = { {0, {0} } };
1020         int i;
1021
1022         if (false == streams_changed(core_dc, streams, stream_count))
1023                 return DC_OK;
1024
1025         dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
1026                                 __func__, stream_count);
1027
1028         for (i = 0; i < stream_count; i++) {
1029                 const struct dc_stream *stream = streams[i];
1030                 const struct dc_stream_status *status = dc_stream_get_status(stream);
1031                 int j;
1032
1033                 dc_stream_log(stream,
1034                                 core_dc->ctx->logger,
1035                                 LOG_DC);
1036
1037                 set[i].stream = stream;
1038
1039                 if (status) {
1040                         set[i].surface_count = status->surface_count;
1041                         for (j = 0; j < status->surface_count; j++)
1042                                 set[i].surfaces[j] = status->surfaces[j];
1043                 }
1044
1045         }
1046
1047         context = dm_alloc(sizeof(struct validate_context));
1048         if (context == NULL)
1049                 goto context_alloc_fail;
1050
1051         ++context->ref_count;
1052
1053         result = core_dc->res_pool->funcs->validate_with_context(
1054                         core_dc, set, stream_count, context, core_dc->current_context);
1055         if (result != DC_OK){
1056                 dm_logger_write(core_dc->ctx->logger, LOG_ERROR,
1057                                         "%s: Context validation failed! dc_status:%d\n",
1058                                         __func__,
1059                                         result);
1060                 BREAK_TO_DEBUGGER();
1061                 goto fail;
1062         }
1063
1064         result = dc_commit_context_no_check(dc, context);
1065
1066 fail:
1067         dc_release_validate_context(context);
1068
1069 context_alloc_fail:
1070         return (result == DC_OK);
1071 }
1072
1073 bool dc_post_update_surfaces_to_stream(struct dc *dc)
1074 {
1075         int i;
1076         struct core_dc *core_dc = DC_TO_CORE(dc);
1077         struct validate_context *context = core_dc->current_context;
1078
1079         post_surface_trace(dc);
1080
1081         for (i = 0; i < core_dc->res_pool->pipe_count; i++)
1082                 if (context->res_ctx.pipe_ctx[i].stream == NULL
1083                                 || context->res_ctx.pipe_ctx[i].surface == NULL)
1084                         core_dc->hwss.power_down_front_end(core_dc, i);
1085
1086         /* 3rd param should be true, temp w/a for RV*/
1087 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1088         core_dc->hwss.set_bandwidth(core_dc, context, core_dc->ctx->dce_version != DCN_VERSION_1_0);
1089 #else
1090         core_dc->hwss.set_bandwidth(core_dc, context, true);
1091 #endif
1092         return true;
1093 }
1094
1095 bool dc_commit_surfaces_to_stream(
1096                 struct dc *dc,
1097                 const struct dc_surface **new_surfaces,
1098                 uint8_t new_surface_count,
1099                 const struct dc_stream *dc_stream)
1100 {
1101         struct dc_surface_update updates[MAX_SURFACES];
1102         struct dc_flip_addrs flip_addr[MAX_SURFACES];
1103         struct dc_plane_info plane_info[MAX_SURFACES];
1104         struct dc_scaling_info scaling_info[MAX_SURFACES];
1105         int i;
1106         struct dc_stream_update *stream_update =
1107                         dm_alloc(sizeof(struct dc_stream_update));
1108
1109         if (!stream_update) {
1110                 BREAK_TO_DEBUGGER();
1111                 return false;
1112         }
1113
1114         memset(updates, 0, sizeof(updates));
1115         memset(flip_addr, 0, sizeof(flip_addr));
1116         memset(plane_info, 0, sizeof(plane_info));
1117         memset(scaling_info, 0, sizeof(scaling_info));
1118
1119         stream_update->src = dc_stream->src;
1120         stream_update->dst = dc_stream->dst;
1121         stream_update->out_transfer_func = dc_stream->out_transfer_func;
1122
1123         for (i = 0; i < new_surface_count; i++) {
1124                 updates[i].surface = new_surfaces[i];
1125                 updates[i].gamma =
1126                         (struct dc_gamma *)new_surfaces[i]->gamma_correction;
1127                 updates[i].in_transfer_func = new_surfaces[i]->in_transfer_func;
1128                 flip_addr[i].address = new_surfaces[i]->address;
1129                 flip_addr[i].flip_immediate = new_surfaces[i]->flip_immediate;
1130                 plane_info[i].color_space = new_surfaces[i]->color_space;
1131                 plane_info[i].format = new_surfaces[i]->format;
1132                 plane_info[i].plane_size = new_surfaces[i]->plane_size;
1133                 plane_info[i].rotation = new_surfaces[i]->rotation;
1134                 plane_info[i].horizontal_mirror = new_surfaces[i]->horizontal_mirror;
1135                 plane_info[i].stereo_format = new_surfaces[i]->stereo_format;
1136                 plane_info[i].tiling_info = new_surfaces[i]->tiling_info;
1137                 plane_info[i].visible = new_surfaces[i]->visible;
1138                 plane_info[i].per_pixel_alpha = new_surfaces[i]->per_pixel_alpha;
1139                 plane_info[i].dcc = new_surfaces[i]->dcc;
1140                 scaling_info[i].scaling_quality = new_surfaces[i]->scaling_quality;
1141                 scaling_info[i].src_rect = new_surfaces[i]->src_rect;
1142                 scaling_info[i].dst_rect = new_surfaces[i]->dst_rect;
1143                 scaling_info[i].clip_rect = new_surfaces[i]->clip_rect;
1144
1145                 updates[i].flip_addr = &flip_addr[i];
1146                 updates[i].plane_info = &plane_info[i];
1147                 updates[i].scaling_info = &scaling_info[i];
1148         }
1149
1150         dc_update_surfaces_and_stream(
1151                         dc,
1152                         updates,
1153                         new_surface_count,
1154                         dc_stream, stream_update);
1155
1156         dc_post_update_surfaces_to_stream(dc);
1157
1158         dm_free(stream_update);
1159         return true;
1160 }
1161
1162 void dc_retain_validate_context(struct validate_context *context)
1163 {
1164         ASSERT(context->ref_count > 0);
1165         ++context->ref_count;
1166 }
1167
1168 void dc_release_validate_context(struct validate_context *context)
1169 {
1170         ASSERT(context->ref_count > 0);
1171         --context->ref_count;
1172
1173         if (context->ref_count == 0) {
1174                 dc_resource_validate_ctx_destruct(context);
1175                 dm_free(context);
1176         }
1177 }
1178
1179 static bool is_surface_in_context(
1180                 const struct validate_context *context,
1181                 const struct dc_surface *surface)
1182 {
1183         int j;
1184
1185         for (j = 0; j < MAX_PIPES; j++) {
1186                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1187
1188                 if (surface == &pipe_ctx->surface->public) {
1189                         return true;
1190                 }
1191         }
1192
1193         return false;
1194 }
1195
1196 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1197 {
1198         switch (format) {
1199         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1200         case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1201                 return 12;
1202         case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1203         case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1204         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1205         case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1206                 return 16;
1207         case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1208         case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1209         case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1210         case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1211                 return 32;
1212         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1213         case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1214         case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1215                 return 64;
1216         default:
1217                 ASSERT_CRITICAL(false);
1218                 return -1;
1219         }
1220 }
1221
1222 static enum surface_update_type get_plane_info_update_type(
1223                 const struct dc_surface_update *u,
1224                 int surface_index)
1225 {
1226         struct dc_plane_info temp_plane_info;
1227         memset(&temp_plane_info, 0, sizeof(temp_plane_info));
1228
1229         if (!u->plane_info)
1230                 return UPDATE_TYPE_FAST;
1231
1232         temp_plane_info = *u->plane_info;
1233
1234         /* Copy all parameters that will cause a full update
1235          * from current surface, the rest of the parameters
1236          * from provided plane configuration.
1237          * Perform memory compare and special validation
1238          * for those that can cause fast/medium updates
1239          */
1240
1241         /* Full update parameters */
1242         temp_plane_info.color_space = u->surface->color_space;
1243         temp_plane_info.dcc = u->surface->dcc;
1244         temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
1245         temp_plane_info.plane_size = u->surface->plane_size;
1246         temp_plane_info.rotation = u->surface->rotation;
1247         temp_plane_info.stereo_format = u->surface->stereo_format;
1248         temp_plane_info.tiling_info = u->surface->tiling_info;
1249
1250         if (surface_index == 0)
1251                 temp_plane_info.visible = u->plane_info->visible;
1252         else
1253                 temp_plane_info.visible = u->surface->visible;
1254
1255         if (memcmp(u->plane_info, &temp_plane_info,
1256                         sizeof(struct dc_plane_info)) != 0)
1257                 return UPDATE_TYPE_FULL;
1258
1259         if (pixel_format_to_bpp(u->plane_info->format) !=
1260                         pixel_format_to_bpp(u->surface->format)) {
1261                 return UPDATE_TYPE_FULL;
1262         } else {
1263                 return UPDATE_TYPE_MED;
1264         }
1265 }
1266
1267 static enum surface_update_type  get_scaling_info_update_type(
1268                 const struct dc_surface_update *u)
1269 {
1270         if (!u->scaling_info)
1271                 return UPDATE_TYPE_FAST;
1272
1273         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1274                         || u->scaling_info->src_rect.height != u->surface->src_rect.height
1275                         || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1276                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1277                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1278                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
1279                 return UPDATE_TYPE_FULL;
1280
1281         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1282                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
1283                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1284                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1285                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1286                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1287                 return UPDATE_TYPE_MED;
1288
1289         return UPDATE_TYPE_FAST;
1290 }
1291
1292 static enum surface_update_type det_surface_update(
1293                 const struct core_dc *dc,
1294                 const struct dc_surface_update *u,
1295                 int surface_index)
1296 {
1297         const struct validate_context *context = dc->current_context;
1298         enum surface_update_type type = UPDATE_TYPE_FAST;
1299         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1300
1301         if (!is_surface_in_context(context, u->surface))
1302                 return UPDATE_TYPE_FULL;
1303
1304         type = get_plane_info_update_type(u, surface_index);
1305         if (overall_type < type)
1306                 overall_type = type;
1307
1308         type = get_scaling_info_update_type(u);
1309         if (overall_type < type)
1310                 overall_type = type;
1311
1312         if (u->in_transfer_func ||
1313                 u->hdr_static_metadata) {
1314                 if (overall_type < UPDATE_TYPE_MED)
1315                         overall_type = UPDATE_TYPE_MED;
1316         }
1317
1318         return overall_type;
1319 }
1320
1321 enum surface_update_type dc_check_update_surfaces_for_stream(
1322                 struct dc *dc,
1323                 struct dc_surface_update *updates,
1324                 int surface_count,
1325                 struct dc_stream_update *stream_update,
1326                 const struct dc_stream_status *stream_status)
1327 {
1328         struct core_dc *core_dc = DC_TO_CORE(dc);
1329         int i;
1330         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1331
1332         if (stream_status == NULL || stream_status->surface_count != surface_count)
1333                 return UPDATE_TYPE_FULL;
1334
1335         if (stream_update)
1336                 return UPDATE_TYPE_FULL;
1337
1338         for (i = 0 ; i < surface_count; i++) {
1339                 enum surface_update_type type =
1340                                 det_surface_update(core_dc, &updates[i], i);
1341
1342                 if (type == UPDATE_TYPE_FULL)
1343                         return type;
1344
1345                 if (overall_type < type)
1346                         overall_type = type;
1347         }
1348
1349         return overall_type;
1350 }
1351
1352 enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1353
1354 void dc_update_surfaces_and_stream(struct dc *dc,
1355                 struct dc_surface_update *srf_updates, int surface_count,
1356                 const struct dc_stream *dc_stream,
1357                 struct dc_stream_update *stream_update)
1358 {
1359         struct core_dc *core_dc = DC_TO_CORE(dc);
1360         struct validate_context *context;
1361         int i, j;
1362         enum surface_update_type update_type;
1363         const struct dc_stream_status *stream_status;
1364         struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
1365         struct dc_context *dc_ctx = core_dc->ctx;
1366
1367         stream_status = dc_stream_get_status(dc_stream);
1368         ASSERT(stream_status);
1369         if (!stream_status)
1370                 return; /* Cannot commit surface to stream that is not committed */
1371
1372 #ifdef ENABLE_FBC
1373         if (srf_updates->flip_addr) {
1374                 if (srf_updates->flip_addr->address.grph.addr.low_part == 0)
1375                         ASSERT(0);
1376         }
1377 #endif
1378         context = core_dc->current_context;
1379
1380         /* update current stream with the new updates */
1381         if (stream_update) {
1382                 if ((stream_update->src.height != 0) &&
1383                                 (stream_update->src.width != 0))
1384                         stream->public.src = stream_update->src;
1385
1386                 if ((stream_update->dst.height != 0) &&
1387                                 (stream_update->dst.width != 0))
1388                         stream->public.dst = stream_update->dst;
1389
1390                 if (stream_update->out_transfer_func &&
1391                                 stream_update->out_transfer_func !=
1392                                                 dc_stream->out_transfer_func) {
1393                         if (dc_stream->out_transfer_func != NULL)
1394                                 dc_transfer_func_release(dc_stream->out_transfer_func);
1395                         dc_transfer_func_retain(stream_update->out_transfer_func);
1396                         stream->public.out_transfer_func =
1397                                 stream_update->out_transfer_func;
1398                 }
1399         }
1400
1401         /* do not perform surface update if surface has invalid dimensions
1402          * (all zero) and no scaling_info is provided
1403          */
1404         if (surface_count > 0 &&
1405                         srf_updates->surface->src_rect.width == 0 &&
1406                         srf_updates->surface->src_rect.height == 0 &&
1407                         srf_updates->surface->dst_rect.width == 0 &&
1408                         srf_updates->surface->dst_rect.height == 0 &&
1409                         !srf_updates->scaling_info) {
1410                 ASSERT(false);
1411                 return;
1412         }
1413
1414         update_type = dc_check_update_surfaces_for_stream(
1415                         dc, srf_updates, surface_count, stream_update, stream_status);
1416
1417         if (update_type >= update_surface_trace_level)
1418                 update_surface_trace(dc, srf_updates, surface_count);
1419
1420         if (update_type >= UPDATE_TYPE_FULL) {
1421                 const struct dc_surface *new_surfaces[MAX_SURFACES] = {0};
1422
1423                 for (i = 0; i < surface_count; i++)
1424                         new_surfaces[i] = srf_updates[i].surface;
1425
1426                 /* initialize scratch memory for building context */
1427                 context = dm_alloc(sizeof(*context));
1428                 if (context == NULL)
1429                                 goto context_alloc_fail;
1430
1431                 ++context->ref_count;
1432
1433                 dc_resource_validate_ctx_copy_construct(
1434                                 core_dc->current_context, context);
1435
1436                 /* add surface to context */
1437                 if (!resource_attach_surfaces_to_context(
1438                                 new_surfaces, surface_count, dc_stream,
1439                                 context, core_dc->res_pool)) {
1440                         BREAK_TO_DEBUGGER();
1441                         goto fail;
1442                 }
1443         }
1444
1445         /* save update parameters into surface */
1446         for (i = 0; i < surface_count; i++) {
1447                 struct core_surface *surface =
1448                                 DC_SURFACE_TO_CORE(srf_updates[i].surface);
1449
1450                 if (srf_updates[i].flip_addr) {
1451                         surface->public.address = srf_updates[i].flip_addr->address;
1452                         surface->public.flip_immediate =
1453                                         srf_updates[i].flip_addr->flip_immediate;
1454                 }
1455
1456                 if (srf_updates[i].scaling_info) {
1457                         surface->public.scaling_quality =
1458                                         srf_updates[i].scaling_info->scaling_quality;
1459                         surface->public.dst_rect =
1460                                         srf_updates[i].scaling_info->dst_rect;
1461                         surface->public.src_rect =
1462                                         srf_updates[i].scaling_info->src_rect;
1463                         surface->public.clip_rect =
1464                                         srf_updates[i].scaling_info->clip_rect;
1465                 }
1466
1467                 if (srf_updates[i].plane_info) {
1468                         surface->public.color_space =
1469                                         srf_updates[i].plane_info->color_space;
1470                         surface->public.format =
1471                                         srf_updates[i].plane_info->format;
1472                         surface->public.plane_size =
1473                                         srf_updates[i].plane_info->plane_size;
1474                         surface->public.rotation =
1475                                         srf_updates[i].plane_info->rotation;
1476                         surface->public.horizontal_mirror =
1477                                         srf_updates[i].plane_info->horizontal_mirror;
1478                         surface->public.stereo_format =
1479                                         srf_updates[i].plane_info->stereo_format;
1480                         surface->public.tiling_info =
1481                                         srf_updates[i].plane_info->tiling_info;
1482                         surface->public.visible =
1483                                         srf_updates[i].plane_info->visible;
1484                         surface->public.per_pixel_alpha =
1485                                         srf_updates[i].plane_info->per_pixel_alpha;
1486                         surface->public.dcc =
1487                                         srf_updates[i].plane_info->dcc;
1488                 }
1489
1490                 if (update_type >= UPDATE_TYPE_MED) {
1491                         for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1492                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1493
1494                                 if (pipe_ctx->surface != surface)
1495                                         continue;
1496
1497                                 resource_build_scaling_params(pipe_ctx);
1498                         }
1499                 }
1500
1501                 if (srf_updates[i].gamma &&
1502                         srf_updates[i].gamma != surface->public.gamma_correction) {
1503                         if (surface->public.gamma_correction != NULL)
1504                                 dc_gamma_release(&surface->public.
1505                                                 gamma_correction);
1506
1507                         dc_gamma_retain(srf_updates[i].gamma);
1508                         surface->public.gamma_correction =
1509                                                 srf_updates[i].gamma;
1510                 }
1511
1512                 if (srf_updates[i].in_transfer_func &&
1513                         srf_updates[i].in_transfer_func != surface->public.in_transfer_func) {
1514                         if (surface->public.in_transfer_func != NULL)
1515                                 dc_transfer_func_release(
1516                                                 surface->public.
1517                                                 in_transfer_func);
1518
1519                         dc_transfer_func_retain(
1520                                         srf_updates[i].in_transfer_func);
1521                         surface->public.in_transfer_func =
1522                                         srf_updates[i].in_transfer_func;
1523                 }
1524
1525                 if (srf_updates[i].hdr_static_metadata)
1526                         surface->public.hdr_static_ctx =
1527                                 *(srf_updates[i].hdr_static_metadata);
1528         }
1529
1530         if (update_type == UPDATE_TYPE_FULL) {
1531                 if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
1532                         BREAK_TO_DEBUGGER();
1533                         goto fail;
1534                 } else {
1535                         core_dc->hwss.set_bandwidth(core_dc, context, false);
1536                         context_clock_trace(dc, context);
1537                 }
1538         }
1539
1540         if (surface_count == 0)
1541                 core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
1542
1543         /* Lock pipes for provided surfaces, or all active if full update*/
1544         for (i = 0; i < surface_count; i++) {
1545                 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
1546
1547                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1548                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1549
1550                         if (update_type != UPDATE_TYPE_FULL && pipe_ctx->surface != surface)
1551                                 continue;
1552                         if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1553                                 continue;
1554
1555                         if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1556                                 core_dc->hwss.pipe_control_lock(
1557                                                 core_dc,
1558                                                 pipe_ctx,
1559                                                 true);
1560                         }
1561                 }
1562                 if (update_type == UPDATE_TYPE_FULL)
1563                         break;
1564         }
1565
1566         /* Full fe update*/
1567         for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1568                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1569                 struct pipe_ctx *cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
1570                 bool is_new_pipe_surface = cur_pipe_ctx->surface != pipe_ctx->surface;
1571                 struct dc_cursor_position position = { 0 };
1572
1573                 if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->surface)
1574                         continue;
1575
1576                 if (!pipe_ctx->top_pipe)
1577                         core_dc->hwss.apply_ctx_for_surface(
1578                                         core_dc, pipe_ctx->surface, context);
1579
1580                 /* TODO: this is a hack w/a for switching from mpo to pipe split */
1581                 dc_stream_set_cursor_position(&pipe_ctx->stream->public, &position);
1582
1583                 if (is_new_pipe_surface) {
1584                         core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1585                         core_dc->hwss.set_input_transfer_func(
1586                                         pipe_ctx, pipe_ctx->surface);
1587                         core_dc->hwss.set_output_transfer_func(
1588                                         pipe_ctx, pipe_ctx->stream);
1589                 }
1590         }
1591
1592         if (update_type > UPDATE_TYPE_FAST)
1593                 context_timing_trace(dc, &context->res_ctx);
1594
1595         /* Perform requested Updates */
1596         for (i = 0; i < surface_count; i++) {
1597                 struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
1598
1599                 if (update_type == UPDATE_TYPE_MED)
1600                         core_dc->hwss.apply_ctx_for_surface(
1601                                         core_dc, surface, context);
1602
1603                 for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
1604                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1605
1606                         if (pipe_ctx->surface != surface)
1607                                 continue;
1608
1609                         if (srf_updates[i].flip_addr)
1610                                 core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
1611
1612                         if (update_type == UPDATE_TYPE_FAST)
1613                                 continue;
1614
1615                         if (srf_updates[i].in_transfer_func)
1616                                 core_dc->hwss.set_input_transfer_func(
1617                                                 pipe_ctx, pipe_ctx->surface);
1618
1619                         if (stream_update != NULL &&
1620                                         stream_update->out_transfer_func != NULL) {
1621                                 core_dc->hwss.set_output_transfer_func(
1622                                                 pipe_ctx, pipe_ctx->stream);
1623                         }
1624
1625                         if (srf_updates[i].hdr_static_metadata) {
1626                                 resource_build_info_frame(pipe_ctx);
1627                                 core_dc->hwss.update_info_frame(pipe_ctx);
1628                         }
1629                 }
1630         }
1631
1632         /* Unlock pipes */
1633         for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
1634                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1635
1636                 for (j = 0; j < surface_count; j++) {
1637                         if (update_type != UPDATE_TYPE_FULL &&
1638                                         srf_updates[j].surface != &pipe_ctx->surface->public)
1639                                 continue;
1640                         if (!pipe_ctx->surface || pipe_ctx->top_pipe)
1641                                 continue;
1642
1643                         if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
1644                                 core_dc->hwss.pipe_control_lock(
1645                                                 core_dc,
1646                                                 pipe_ctx,
1647                                                 false);
1648                         }
1649                         break;
1650                 }
1651         }
1652
1653         if (core_dc->current_context != context) {
1654                 dc_release_validate_context(core_dc->current_context);
1655                 core_dc->current_context = context;
1656         }
1657         return;
1658
1659 fail:
1660         dc_release_validate_context(context);
1661
1662 context_alloc_fail:
1663         DC_ERROR("Failed to allocate new validate context!\n");
1664 }
1665
1666 uint8_t dc_get_current_stream_count(const struct dc *dc)
1667 {
1668         struct core_dc *core_dc = DC_TO_CORE(dc);
1669         return core_dc->current_context->stream_count;
1670 }
1671
1672 struct dc_stream *dc_get_stream_at_index(const struct dc *dc, uint8_t i)
1673 {
1674         struct core_dc *core_dc = DC_TO_CORE(dc);
1675         if (i < core_dc->current_context->stream_count)
1676                 return &(core_dc->current_context->streams[i]->public);
1677         return NULL;
1678 }
1679
1680 const struct dc_link *dc_get_link_at_index(const struct dc *dc, uint32_t link_index)
1681 {
1682         struct core_dc *core_dc = DC_TO_CORE(dc);
1683         return &core_dc->links[link_index]->public;
1684 }
1685
1686 const struct graphics_object_id dc_get_link_id_at_index(
1687         struct dc *dc, uint32_t link_index)
1688 {
1689         struct core_dc *core_dc = DC_TO_CORE(dc);
1690         return core_dc->links[link_index]->link_id;
1691 }
1692
1693 enum dc_irq_source dc_get_hpd_irq_source_at_index(
1694         struct dc *dc, uint32_t link_index)
1695 {
1696         struct core_dc *core_dc = DC_TO_CORE(dc);
1697         return core_dc->links[link_index]->public.irq_source_hpd;
1698 }
1699
1700 const struct audio **dc_get_audios(struct dc *dc)
1701 {
1702         struct core_dc *core_dc = DC_TO_CORE(dc);
1703         return (const struct audio **)core_dc->res_pool->audios;
1704 }
1705
1706 enum dc_irq_source dc_interrupt_to_irq_source(
1707                 struct dc *dc,
1708                 uint32_t src_id,
1709                 uint32_t ext_id)
1710 {
1711         struct core_dc *core_dc = DC_TO_CORE(dc);
1712         return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
1713 }
1714
1715 void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
1716 {
1717         struct core_dc *core_dc;
1718
1719         if (dc == NULL)
1720                 return;
1721         core_dc = DC_TO_CORE(dc);
1722
1723         dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
1724 }
1725
1726 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1727 {
1728         struct core_dc *core_dc = DC_TO_CORE(dc);
1729         dal_irq_service_ack(core_dc->res_pool->irqs, src);
1730 }
1731
1732 void dc_set_power_state(
1733         struct dc *dc,
1734         enum dc_acpi_cm_power_state power_state)
1735 {
1736         struct core_dc *core_dc = DC_TO_CORE(dc);
1737         int ref_count;
1738
1739         switch (power_state) {
1740         case DC_ACPI_CM_POWER_STATE_D0:
1741                 core_dc->hwss.init_hw(core_dc);
1742                 break;
1743         default:
1744
1745                 core_dc->hwss.power_down(core_dc);
1746
1747                 /* Zero out the current context so that on resume we start with
1748                  * clean state, and dc hw programming optimizations will not
1749                  * cause any trouble.
1750                  */
1751
1752                 /* Preserve refcount */
1753                 ref_count = core_dc->current_context->ref_count;
1754                 dc_resource_validate_ctx_destruct(core_dc->current_context);
1755                 memset(core_dc->current_context, 0,
1756                                 sizeof(*core_dc->current_context));
1757                 core_dc->current_context->ref_count = ref_count;
1758
1759                 break;
1760         }
1761
1762 }
1763
1764 void dc_resume(const struct dc *dc)
1765 {
1766         struct core_dc *core_dc = DC_TO_CORE(dc);
1767
1768         uint32_t i;
1769
1770         for (i = 0; i < core_dc->link_count; i++)
1771                 core_link_resume(core_dc->links[i]);
1772 }
1773
1774 bool dc_read_aux_dpcd(
1775                 struct dc *dc,
1776                 uint32_t link_index,
1777                 uint32_t address,
1778                 uint8_t *data,
1779                 uint32_t size)
1780 {
1781         struct core_dc *core_dc = DC_TO_CORE(dc);
1782
1783         struct core_link *link = core_dc->links[link_index];
1784         enum ddc_result r = dal_ddc_service_read_dpcd_data(
1785                         link->public.ddc,
1786                         false,
1787                         I2C_MOT_UNDEF,
1788                         address,
1789                         data,
1790                         size);
1791         return r == DDC_RESULT_SUCESSFULL;
1792 }
1793
1794 bool dc_write_aux_dpcd(
1795                 struct dc *dc,
1796                 uint32_t link_index,
1797                 uint32_t address,
1798                 const uint8_t *data,
1799                 uint32_t size)
1800 {
1801         struct core_dc *core_dc = DC_TO_CORE(dc);
1802         struct core_link *link = core_dc->links[link_index];
1803
1804         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1805                         link->public.ddc,
1806                         false,
1807                         I2C_MOT_UNDEF,
1808                         address,
1809                         data,
1810                         size);
1811         return r == DDC_RESULT_SUCESSFULL;
1812 }
1813
1814 bool dc_read_aux_i2c(
1815                 struct dc *dc,
1816                 uint32_t link_index,
1817                 enum i2c_mot_mode mot,
1818                 uint32_t address,
1819                 uint8_t *data,
1820                 uint32_t size)
1821 {
1822         struct core_dc *core_dc = DC_TO_CORE(dc);
1823
1824                 struct core_link *link = core_dc->links[link_index];
1825                 enum ddc_result r = dal_ddc_service_read_dpcd_data(
1826                         link->public.ddc,
1827                         true,
1828                         mot,
1829                         address,
1830                         data,
1831                         size);
1832                 return r == DDC_RESULT_SUCESSFULL;
1833 }
1834
1835 bool dc_write_aux_i2c(
1836                 struct dc *dc,
1837                 uint32_t link_index,
1838                 enum i2c_mot_mode mot,
1839                 uint32_t address,
1840                 const uint8_t *data,
1841                 uint32_t size)
1842 {
1843         struct core_dc *core_dc = DC_TO_CORE(dc);
1844         struct core_link *link = core_dc->links[link_index];
1845
1846         enum ddc_result r = dal_ddc_service_write_dpcd_data(
1847                         link->public.ddc,
1848                         true,
1849                         mot,
1850                         address,
1851                         data,
1852                         size);
1853         return r == DDC_RESULT_SUCESSFULL;
1854 }
1855
1856 bool dc_query_ddc_data(
1857                 struct dc *dc,
1858                 uint32_t link_index,
1859                 uint32_t address,
1860                 uint8_t *write_buf,
1861                 uint32_t write_size,
1862                 uint8_t *read_buf,
1863                 uint32_t read_size) {
1864
1865         struct core_dc *core_dc = DC_TO_CORE(dc);
1866
1867         struct core_link *link = core_dc->links[link_index];
1868
1869         bool result = dal_ddc_service_query_ddc_data(
1870                         link->public.ddc,
1871                         address,
1872                         write_buf,
1873                         write_size,
1874                         read_buf,
1875                         read_size);
1876
1877         return result;
1878 }
1879
1880 bool dc_submit_i2c(
1881                 struct dc *dc,
1882                 uint32_t link_index,
1883                 struct i2c_command *cmd)
1884 {
1885         struct core_dc *core_dc = DC_TO_CORE(dc);
1886
1887         struct core_link *link = core_dc->links[link_index];
1888         struct ddc_service *ddc = link->public.ddc;
1889
1890         return dal_i2caux_submit_i2c_command(
1891                 ddc->ctx->i2caux,
1892                 ddc->ddc_pin,
1893                 cmd);
1894 }
1895
1896 static bool link_add_remote_sink_helper(struct core_link *core_link, struct dc_sink *sink)
1897 {
1898         struct dc_link *dc_link = &core_link->public;
1899
1900         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1901                 BREAK_TO_DEBUGGER();
1902                 return false;
1903         }
1904
1905         dc_sink_retain(sink);
1906
1907         dc_link->remote_sinks[dc_link->sink_count] = sink;
1908         dc_link->sink_count++;
1909
1910         return true;
1911 }
1912
1913 struct dc_sink *dc_link_add_remote_sink(
1914                 const struct dc_link *link,
1915                 const uint8_t *edid,
1916                 int len,
1917                 struct dc_sink_init_data *init_data)
1918 {
1919         struct dc_sink *dc_sink;
1920         enum dc_edid_status edid_status;
1921         struct core_link *core_link = DC_LINK_TO_LINK(link);
1922
1923         if (len > MAX_EDID_BUFFER_SIZE) {
1924                 dm_error("Max EDID buffer size breached!\n");
1925                 return NULL;
1926         }
1927
1928         if (!init_data) {
1929                 BREAK_TO_DEBUGGER();
1930                 return NULL;
1931         }
1932
1933         if (!init_data->link) {
1934                 BREAK_TO_DEBUGGER();
1935                 return NULL;
1936         }
1937
1938         dc_sink = dc_sink_create(init_data);
1939
1940         if (!dc_sink)
1941                 return NULL;
1942
1943         memmove(dc_sink->dc_edid.raw_edid, edid, len);
1944         dc_sink->dc_edid.length = len;
1945
1946         if (!link_add_remote_sink_helper(
1947                         core_link,
1948                         dc_sink))
1949                 goto fail_add_sink;
1950
1951         edid_status = dm_helpers_parse_edid_caps(
1952                         core_link->ctx,
1953                         &dc_sink->dc_edid,
1954                         &dc_sink->edid_caps);
1955
1956         if (edid_status != EDID_OK)
1957                 goto fail;
1958
1959         return dc_sink;
1960 fail:
1961         dc_link_remove_remote_sink(link, dc_sink);
1962 fail_add_sink:
1963         dc_sink_release(dc_sink);
1964         return NULL;
1965 }
1966
1967 void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
1968 {
1969         struct core_link *core_link = DC_LINK_TO_LINK(link);
1970         struct dc_link *dc_link = &core_link->public;
1971
1972         dc_link->local_sink = sink;
1973
1974         if (sink == NULL) {
1975                 dc_link->type = dc_connection_none;
1976         } else {
1977                 dc_link->type = dc_connection_single;
1978         }
1979 }
1980
1981 void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
1982 {
1983         int i;
1984         struct core_link *core_link = DC_LINK_TO_LINK(link);
1985         struct dc_link *dc_link = &core_link->public;
1986
1987         if (!link->sink_count) {
1988                 BREAK_TO_DEBUGGER();
1989                 return;
1990         }
1991
1992         for (i = 0; i < dc_link->sink_count; i++) {
1993                 if (dc_link->remote_sinks[i] == sink) {
1994                         dc_sink_release(sink);
1995                         dc_link->remote_sinks[i] = NULL;
1996
1997                         /* shrink array to remove empty place */
1998                         while (i < dc_link->sink_count - 1) {
1999                                 dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
2000                                 i++;
2001                         }
2002                         dc_link->remote_sinks[i] = NULL;
2003                         dc_link->sink_count--;
2004                         return;
2005                 }
2006         }
2007 }
2008
2009 bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
2010 {
2011         int i;
2012         struct core_dc *core_dc = DC_TO_CORE(dc);
2013         struct mem_input *mi = NULL;
2014
2015         for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
2016                 if (core_dc->res_pool->mis[i] != NULL) {
2017                         mi = core_dc->res_pool->mis[i];
2018                         break;
2019                 }
2020         }
2021         if (mi == NULL) {
2022                 dm_error("no mem_input!\n");
2023                 return false;
2024         }
2025
2026         if (mi->funcs->mem_input_update_dchub)
2027                 mi->funcs->mem_input_update_dchub(mi, dh_data);
2028         else
2029                 ASSERT(mi->funcs->mem_input_update_dchub);
2030
2031
2032         return true;
2033
2034 }
2035
2036 void dc_log_hw_state(struct dc *dc)
2037 {
2038         struct core_dc *core_dc = DC_TO_CORE(dc);
2039
2040         if (core_dc->hwss.log_hw_state)
2041                 core_dc->hwss.log_hw_state(core_dc);
2042 }