Merge tag 'arm-soc/for-5.18/maintainers' of https://github.com/Broadcom/stblinux...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "dm_services.h"
29
30 #include "dc.h"
31
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36
37 #include "resource.h"
38
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42
43 #include "bios_parser_interface.h"
44 #include "bios/bios_parser_helper.h"
45 #include "include/irq_service_interface.h"
46 #include "transform.h"
47 #include "dmcu.h"
48 #include "dpp.h"
49 #include "timing_generator.h"
50 #include "abm.h"
51 #include "virtual/virtual_link_encoder.h"
52 #include "hubp.h"
53
54 #include "link_hwss.h"
55 #include "link_encoder.h"
56 #include "link_enc_cfg.h"
57
58 #include "dc_link.h"
59 #include "dc_link_ddc.h"
60 #include "dm_helpers.h"
61 #include "mem_input.h"
62
63 #include "dc_link_dp.h"
64 #include "dc_dmub_srv.h"
65
66 #include "dsc.h"
67
68 #include "vm_helper.h"
69
70 #include "dce/dce_i2c.h"
71
72 #include "dmub/dmub_srv.h"
73
74 #include "i2caux_interface.h"
75 #include "dce/dmub_hw_lock_mgr.h"
76
77 #include "dc_trace.h"
78
79 #define CTX \
80         dc->ctx
81
82 #define DC_LOGGER \
83         dc->ctx->logger
84
85 static const char DC_BUILD_ID[] = "production-build";
86
87 /**
88  * DOC: Overview
89  *
90  * DC is the OS-agnostic component of the amdgpu DC driver.
91  *
92  * DC maintains and validates a set of structs representing the state of the
93  * driver and writes that state to AMD hardware
94  *
95  * Main DC HW structs:
96  *
97  * struct dc - The central struct.  One per driver.  Created on driver load,
98  * destroyed on driver unload.
99  *
100  * struct dc_context - One per driver.
101  * Used as a backpointer by most other structs in dc.
102  *
103  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
104  * plugpoints).  Created on driver load, destroyed on driver unload.
105  *
106  * struct dc_sink - One per display.  Created on boot or hotplug.
107  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
108  * (the display directly attached).  It may also have one or more remote
109  * sinks (in the Multi-Stream Transport case)
110  *
111  * struct resource_pool - One per driver.  Represents the hw blocks not in the
112  * main pipeline.  Not directly accessible by dm.
113  *
114  * Main dc state structs:
115  *
116  * These structs can be created and destroyed as needed.  There is a full set of
117  * these structs in dc->current_state representing the currently programmed state.
118  *
119  * struct dc_state - The global DC state to track global state information,
120  * such as bandwidth values.
121  *
122  * struct dc_stream_state - Represents the hw configuration for the pipeline from
123  * a framebuffer to a display.  Maps one-to-one with dc_sink.
124  *
125  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
126  * and may have more in the Multi-Plane Overlay case.
127  *
128  * struct resource_context - Represents the programmable state of everything in
129  * the resource_pool.  Not directly accessible by dm.
130  *
131  * struct pipe_ctx - A member of struct resource_context.  Represents the
132  * internal hardware pipeline components.  Each dc_plane_state has either
133  * one or two (in the pipe-split case).
134  */
135
136 /*******************************************************************************
137  * Private functions
138  ******************************************************************************/
139
140 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
141 {
142         if (new > *original)
143                 *original = new;
144 }
145
146 static void destroy_links(struct dc *dc)
147 {
148         uint32_t i;
149
150         for (i = 0; i < dc->link_count; i++) {
151                 if (NULL != dc->links[i])
152                         link_destroy(&dc->links[i]);
153         }
154 }
155
156 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
157 {
158         int i;
159         uint32_t count = 0;
160
161         for (i = 0; i < num_links; i++) {
162                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
163                                 links[i]->is_internal_display)
164                         count++;
165         }
166
167         return count;
168 }
169
170 static int get_seamless_boot_stream_count(struct dc_state *ctx)
171 {
172         uint8_t i;
173         uint8_t seamless_boot_stream_count = 0;
174
175         for (i = 0; i < ctx->stream_count; i++)
176                 if (ctx->streams[i]->apply_seamless_boot_optimization)
177                         seamless_boot_stream_count++;
178
179         return seamless_boot_stream_count;
180 }
181
182 static bool create_links(
183                 struct dc *dc,
184                 uint32_t num_virtual_links)
185 {
186         int i;
187         int connectors_num;
188         struct dc_bios *bios = dc->ctx->dc_bios;
189
190         dc->link_count = 0;
191
192         connectors_num = bios->funcs->get_connectors_number(bios);
193
194         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
195
196         if (connectors_num > ENUM_ID_COUNT) {
197                 dm_error(
198                         "DC: Number of connectors %d exceeds maximum of %d!\n",
199                         connectors_num,
200                         ENUM_ID_COUNT);
201                 return false;
202         }
203
204         dm_output_to_console(
205                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
206                 __func__,
207                 connectors_num,
208                 num_virtual_links);
209
210         for (i = 0; i < connectors_num; i++) {
211                 struct link_init_data link_init_params = {0};
212                 struct dc_link *link;
213
214                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
215
216                 link_init_params.ctx = dc->ctx;
217                 /* next BIOS object table connector */
218                 link_init_params.connector_index = i;
219                 link_init_params.link_index = dc->link_count;
220                 link_init_params.dc = dc;
221                 link = link_create(&link_init_params);
222
223                 if (link) {
224                         dc->links[dc->link_count] = link;
225                         link->dc = dc;
226                         ++dc->link_count;
227                 }
228         }
229
230         DC_LOG_DC("BIOS object table - end");
231
232         /* Create a link for each usb4 dpia port */
233         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
234                 struct link_init_data link_init_params = {0};
235                 struct dc_link *link;
236
237                 link_init_params.ctx = dc->ctx;
238                 link_init_params.connector_index = i;
239                 link_init_params.link_index = dc->link_count;
240                 link_init_params.dc = dc;
241                 link_init_params.is_dpia_link = true;
242
243                 link = link_create(&link_init_params);
244                 if (link) {
245                         dc->links[dc->link_count] = link;
246                         link->dc = dc;
247                         ++dc->link_count;
248                 }
249         }
250
251         for (i = 0; i < num_virtual_links; i++) {
252                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
253                 struct encoder_init_data enc_init = {0};
254
255                 if (link == NULL) {
256                         BREAK_TO_DEBUGGER();
257                         goto failed_alloc;
258                 }
259
260                 link->link_index = dc->link_count;
261                 dc->links[dc->link_count] = link;
262                 dc->link_count++;
263
264                 link->ctx = dc->ctx;
265                 link->dc = dc;
266                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
267                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
268                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
269                 link->link_id.enum_id = ENUM_ID_1;
270                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
271
272                 if (!link->link_enc) {
273                         BREAK_TO_DEBUGGER();
274                         goto failed_alloc;
275                 }
276
277                 link->link_status.dpcd_caps = &link->dpcd_caps;
278
279                 enc_init.ctx = dc->ctx;
280                 enc_init.channel = CHANNEL_ID_UNKNOWN;
281                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
282                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
283                 enc_init.connector = link->link_id;
284                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
285                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
286                 enc_init.encoder.enum_id = ENUM_ID_1;
287                 virtual_link_encoder_construct(link->link_enc, &enc_init);
288         }
289
290         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
291
292         return true;
293
294 failed_alloc:
295         return false;
296 }
297
298 /* Create additional DIG link encoder objects if fewer than the platform
299  * supports were created during link construction. This can happen if the
300  * number of physical connectors is less than the number of DIGs.
301  */
302 static bool create_link_encoders(struct dc *dc)
303 {
304         bool res = true;
305         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
306         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
307         int i;
308
309         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
310          * link encoders and physical display endpoints and does not require
311          * additional link encoder objects.
312          */
313         if (num_usb4_dpia == 0)
314                 return res;
315
316         /* Create as many link encoder objects as the platform supports. DPIA
317          * endpoints can be programmably mapped to any DIG.
318          */
319         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
320                 for (i = 0; i < num_dig_link_enc; i++) {
321                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
322
323                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
324                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
325                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
326                                 if (link_enc) {
327                                         dc->res_pool->link_encoders[i] = link_enc;
328                                         dc->res_pool->dig_link_enc_count++;
329                                 } else {
330                                         res = false;
331                                 }
332                         }
333                 }
334         }
335
336         return res;
337 }
338
339 /* Destroy any additional DIG link encoder objects created by
340  * create_link_encoders().
341  * NB: Must only be called after destroy_links().
342  */
343 static void destroy_link_encoders(struct dc *dc)
344 {
345         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
346         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
347         int i;
348
349         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
350          * link encoders and physical display endpoints and does not require
351          * additional link encoder objects.
352          */
353         if (num_usb4_dpia == 0)
354                 return;
355
356         for (i = 0; i < num_dig_link_enc; i++) {
357                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
358
359                 if (link_enc) {
360                         link_enc->funcs->destroy(&link_enc);
361                         dc->res_pool->link_encoders[i] = NULL;
362                         dc->res_pool->dig_link_enc_count--;
363                 }
364         }
365 }
366
367 static struct dc_perf_trace *dc_perf_trace_create(void)
368 {
369         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
370 }
371
372 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
373 {
374         kfree(*perf_trace);
375         *perf_trace = NULL;
376 }
377
378 /**
379  *  dc_stream_adjust_vmin_vmax:
380  *
381  *  Looks up the pipe context of dc_stream_state and updates the
382  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
383  *  Rate, which is a power-saving feature that targets reducing panel
384  *  refresh rate while the screen is static
385  *
386  *  @dc:     dc reference
387  *  @stream: Initial dc stream state
388  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
389  */
390 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
391                 struct dc_stream_state *stream,
392                 struct dc_crtc_timing_adjust *adjust)
393 {
394         int i;
395         bool ret = false;
396
397         stream->adjust.v_total_max = adjust->v_total_max;
398         stream->adjust.v_total_mid = adjust->v_total_mid;
399         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
400         stream->adjust.v_total_min = adjust->v_total_min;
401
402         for (i = 0; i < MAX_PIPES; i++) {
403                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
404
405                 if (pipe->stream == stream && pipe->stream_res.tg) {
406                         dc->hwss.set_drr(&pipe,
407                                         1,
408                                         *adjust);
409
410                         ret = true;
411                 }
412         }
413         return ret;
414 }
415
416 /**
417  *****************************************************************************
418  *  Function: dc_stream_get_last_vrr_vtotal
419  *
420  *  @brief
421  *     Looks up the pipe context of dc_stream_state and gets the
422  *     last VTOTAL used by DRR (Dynamic Refresh Rate)
423  *
424  *  @param [in] dc: dc reference
425  *  @param [in] stream: Initial dc stream state
426  *  @param [in] adjust: Updated parameters for vertical_total_min and
427  *  vertical_total_max
428  *****************************************************************************
429  */
430 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
431                 struct dc_stream_state *stream,
432                 uint32_t *refresh_rate)
433 {
434         bool status = false;
435
436         int i = 0;
437
438         for (i = 0; i < MAX_PIPES; i++) {
439                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
440
441                 if (pipe->stream == stream && pipe->stream_res.tg) {
442                         /* Only execute if a function pointer has been defined for
443                          * the DC version in question
444                          */
445                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
446                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
447
448                                 status = true;
449
450                                 break;
451                         }
452                 }
453         }
454
455         return status;
456 }
457
458 bool dc_stream_get_crtc_position(struct dc *dc,
459                 struct dc_stream_state **streams, int num_streams,
460                 unsigned int *v_pos, unsigned int *nom_v_pos)
461 {
462         /* TODO: Support multiple streams */
463         const struct dc_stream_state *stream = streams[0];
464         int i;
465         bool ret = false;
466         struct crtc_position position;
467
468         for (i = 0; i < MAX_PIPES; i++) {
469                 struct pipe_ctx *pipe =
470                                 &dc->current_state->res_ctx.pipe_ctx[i];
471
472                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
473                         dc->hwss.get_position(&pipe, 1, &position);
474
475                         *v_pos = position.vertical_count;
476                         *nom_v_pos = position.nominal_vcount;
477                         ret = true;
478                 }
479         }
480         return ret;
481 }
482
483 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
484 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
485                              struct crc_params *crc_window)
486 {
487         int i;
488         struct dmcu *dmcu = dc->res_pool->dmcu;
489         struct pipe_ctx *pipe;
490         struct crc_region tmp_win, *crc_win;
491         struct otg_phy_mux mapping_tmp, *mux_mapping;
492
493         /*crc window can't be null*/
494         if (!crc_window)
495                 return false;
496
497         if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
498                 crc_win = &tmp_win;
499                 mux_mapping = &mapping_tmp;
500                 /*set crc window*/
501                 tmp_win.x_start = crc_window->windowa_x_start;
502                 tmp_win.y_start = crc_window->windowa_y_start;
503                 tmp_win.x_end = crc_window->windowa_x_end;
504                 tmp_win.y_end = crc_window->windowa_y_end;
505
506                 for (i = 0; i < MAX_PIPES; i++) {
507                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
508                         if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
509                                 break;
510                 }
511
512                 /* Stream not found */
513                 if (i == MAX_PIPES)
514                         return false;
515
516
517                 /*set mux routing info*/
518                 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
519                 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
520
521                 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
522         } else {
523                 DC_LOG_DC("dmcu is not initialized");
524                 return false;
525         }
526
527         return true;
528 }
529
530 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
531 {
532         int i;
533         struct dmcu *dmcu = dc->res_pool->dmcu;
534         struct pipe_ctx *pipe;
535         struct otg_phy_mux mapping_tmp, *mux_mapping;
536
537         if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
538                 mux_mapping = &mapping_tmp;
539
540                 for (i = 0; i < MAX_PIPES; i++) {
541                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
542                         if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
543                                 break;
544                 }
545
546                 /* Stream not found */
547                 if (i == MAX_PIPES)
548                         return false;
549
550
551                 /*set mux routing info*/
552                 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
553                 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
554
555                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
556         } else {
557                 DC_LOG_DC("dmcu is not initialized");
558                 return false;
559         }
560
561         return true;
562 }
563 #endif
564
565 /**
566  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
567  * @dc: DC Object
568  * @stream: The stream to configure CRC on.
569  * @enable: Enable CRC if true, disable otherwise.
570  * @crc_window: CRC window (x/y start/end) information
571  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
572  *              once.
573  *
574  * By default, only CRC0 is configured, and the entire frame is used to
575  * calculate the crc.
576  */
577 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
578                              struct crc_params *crc_window, bool enable, bool continuous)
579 {
580         int i;
581         struct pipe_ctx *pipe;
582         struct crc_params param;
583         struct timing_generator *tg;
584
585         for (i = 0; i < MAX_PIPES; i++) {
586                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
587                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
588                         break;
589         }
590         /* Stream not found */
591         if (i == MAX_PIPES)
592                 return false;
593
594         /* By default, capture the full frame */
595         param.windowa_x_start = 0;
596         param.windowa_y_start = 0;
597         param.windowa_x_end = pipe->stream->timing.h_addressable;
598         param.windowa_y_end = pipe->stream->timing.v_addressable;
599         param.windowb_x_start = 0;
600         param.windowb_y_start = 0;
601         param.windowb_x_end = pipe->stream->timing.h_addressable;
602         param.windowb_y_end = pipe->stream->timing.v_addressable;
603
604         if (crc_window) {
605                 param.windowa_x_start = crc_window->windowa_x_start;
606                 param.windowa_y_start = crc_window->windowa_y_start;
607                 param.windowa_x_end = crc_window->windowa_x_end;
608                 param.windowa_y_end = crc_window->windowa_y_end;
609                 param.windowb_x_start = crc_window->windowb_x_start;
610                 param.windowb_y_start = crc_window->windowb_y_start;
611                 param.windowb_x_end = crc_window->windowb_x_end;
612                 param.windowb_y_end = crc_window->windowb_y_end;
613         }
614
615         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
616         param.odm_mode = pipe->next_odm_pipe ? 1:0;
617
618         /* Default to the union of both windows */
619         param.selection = UNION_WINDOW_A_B;
620         param.continuous_mode = continuous;
621         param.enable = enable;
622
623         tg = pipe->stream_res.tg;
624
625         /* Only call if supported */
626         if (tg->funcs->configure_crc)
627                 return tg->funcs->configure_crc(tg, &param);
628         DC_LOG_WARNING("CRC capture not supported.");
629         return false;
630 }
631
632 /**
633  * dc_stream_get_crc() - Get CRC values for the given stream.
634  * @dc: DC object
635  * @stream: The DC stream state of the stream to get CRCs from.
636  * @r_cr: CRC value for the first of the 3 channels stored here.
637  * @g_y:  CRC value for the second of the 3 channels stored here.
638  * @b_cb: CRC value for the third of the 3 channels stored here.
639  *
640  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
641  * Return false if stream is not found, or if CRCs are not enabled.
642  */
643 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
644                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
645 {
646         int i;
647         struct pipe_ctx *pipe;
648         struct timing_generator *tg;
649
650         for (i = 0; i < MAX_PIPES; i++) {
651                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
652                 if (pipe->stream == stream)
653                         break;
654         }
655         /* Stream not found */
656         if (i == MAX_PIPES)
657                 return false;
658
659         tg = pipe->stream_res.tg;
660
661         if (tg->funcs->get_crc)
662                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
663         DC_LOG_WARNING("CRC capture not supported.");
664         return false;
665 }
666
667 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
668                 enum dc_dynamic_expansion option)
669 {
670         /* OPP FMT dyn expansion updates*/
671         int i;
672         struct pipe_ctx *pipe_ctx;
673
674         for (i = 0; i < MAX_PIPES; i++) {
675                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
676                                 == stream) {
677                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
678                         pipe_ctx->stream_res.opp->dyn_expansion = option;
679                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
680                                         pipe_ctx->stream_res.opp,
681                                         COLOR_SPACE_YCBCR601,
682                                         stream->timing.display_color_depth,
683                                         stream->signal);
684                 }
685         }
686 }
687
688 void dc_stream_set_dither_option(struct dc_stream_state *stream,
689                 enum dc_dither_option option)
690 {
691         struct bit_depth_reduction_params params;
692         struct dc_link *link = stream->link;
693         struct pipe_ctx *pipes = NULL;
694         int i;
695
696         for (i = 0; i < MAX_PIPES; i++) {
697                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
698                                 stream) {
699                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
700                         break;
701                 }
702         }
703
704         if (!pipes)
705                 return;
706         if (option > DITHER_OPTION_MAX)
707                 return;
708
709         stream->dither_option = option;
710
711         memset(&params, 0, sizeof(params));
712         resource_build_bit_depth_reduction_params(stream, &params);
713         stream->bit_depth_params = params;
714
715         if (pipes->plane_res.xfm &&
716             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
717                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
718                         pipes->plane_res.xfm,
719                         pipes->plane_res.scl_data.lb_params.depth,
720                         &stream->bit_depth_params);
721         }
722
723         pipes->stream_res.opp->funcs->
724                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
725 }
726
727 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
728 {
729         int i;
730         bool ret = false;
731         struct pipe_ctx *pipes;
732
733         for (i = 0; i < MAX_PIPES; i++) {
734                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
735                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
736                         dc->hwss.program_gamut_remap(pipes);
737                         ret = true;
738                 }
739         }
740
741         return ret;
742 }
743
744 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
745 {
746         int i;
747         bool ret = false;
748         struct pipe_ctx *pipes;
749
750         for (i = 0; i < MAX_PIPES; i++) {
751                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
752                                 == stream) {
753
754                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
755                         dc->hwss.program_output_csc(dc,
756                                         pipes,
757                                         stream->output_color_space,
758                                         stream->csc_color_matrix.matrix,
759                                         pipes->stream_res.opp->inst);
760                         ret = true;
761                 }
762         }
763
764         return ret;
765 }
766
767 void dc_stream_set_static_screen_params(struct dc *dc,
768                 struct dc_stream_state **streams,
769                 int num_streams,
770                 const struct dc_static_screen_params *params)
771 {
772         int i, j;
773         struct pipe_ctx *pipes_affected[MAX_PIPES];
774         int num_pipes_affected = 0;
775
776         for (i = 0; i < num_streams; i++) {
777                 struct dc_stream_state *stream = streams[i];
778
779                 for (j = 0; j < MAX_PIPES; j++) {
780                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
781                                         == stream) {
782                                 pipes_affected[num_pipes_affected++] =
783                                                 &dc->current_state->res_ctx.pipe_ctx[j];
784                         }
785                 }
786         }
787
788         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
789 }
790
791 static void dc_destruct(struct dc *dc)
792 {
793         // reset link encoder assignment table on destruct
794         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
795                 link_enc_cfg_init(dc, dc->current_state);
796
797         if (dc->current_state) {
798                 dc_release_state(dc->current_state);
799                 dc->current_state = NULL;
800         }
801
802         destroy_links(dc);
803
804         destroy_link_encoders(dc);
805
806         if (dc->clk_mgr) {
807                 dc_destroy_clk_mgr(dc->clk_mgr);
808                 dc->clk_mgr = NULL;
809         }
810
811         dc_destroy_resource_pool(dc);
812
813         if (dc->ctx->gpio_service)
814                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
815
816         if (dc->ctx->created_bios)
817                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
818
819         dc_perf_trace_destroy(&dc->ctx->perf_trace);
820
821         kfree(dc->ctx);
822         dc->ctx = NULL;
823
824         kfree(dc->bw_vbios);
825         dc->bw_vbios = NULL;
826
827         kfree(dc->bw_dceip);
828         dc->bw_dceip = NULL;
829
830 #ifdef CONFIG_DRM_AMD_DC_DCN
831         kfree(dc->dcn_soc);
832         dc->dcn_soc = NULL;
833
834         kfree(dc->dcn_ip);
835         dc->dcn_ip = NULL;
836
837 #endif
838         kfree(dc->vm_helper);
839         dc->vm_helper = NULL;
840
841 }
842
843 static bool dc_construct_ctx(struct dc *dc,
844                 const struct dc_init_data *init_params)
845 {
846         struct dc_context *dc_ctx;
847         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
848
849         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
850         if (!dc_ctx)
851                 return false;
852
853         dc_ctx->cgs_device = init_params->cgs_device;
854         dc_ctx->driver_context = init_params->driver;
855         dc_ctx->dc = dc;
856         dc_ctx->asic_id = init_params->asic_id;
857         dc_ctx->dc_sink_id_count = 0;
858         dc_ctx->dc_stream_id_count = 0;
859         dc_ctx->dce_environment = init_params->dce_environment;
860
861         /* Create logger */
862
863         dc_version = resource_parse_asic_id(init_params->asic_id);
864         dc_ctx->dce_version = dc_version;
865
866         dc_ctx->perf_trace = dc_perf_trace_create();
867         if (!dc_ctx->perf_trace) {
868                 ASSERT_CRITICAL(false);
869                 return false;
870         }
871
872         dc->ctx = dc_ctx;
873
874         return true;
875 }
876
877 static bool dc_construct(struct dc *dc,
878                 const struct dc_init_data *init_params)
879 {
880         struct dc_context *dc_ctx;
881         struct bw_calcs_dceip *dc_dceip;
882         struct bw_calcs_vbios *dc_vbios;
883 #ifdef CONFIG_DRM_AMD_DC_DCN
884         struct dcn_soc_bounding_box *dcn_soc;
885         struct dcn_ip_params *dcn_ip;
886 #endif
887
888         dc->config = init_params->flags;
889
890         // Allocate memory for the vm_helper
891         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
892         if (!dc->vm_helper) {
893                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
894                 goto fail;
895         }
896
897         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
898
899         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
900         if (!dc_dceip) {
901                 dm_error("%s: failed to create dceip\n", __func__);
902                 goto fail;
903         }
904
905         dc->bw_dceip = dc_dceip;
906
907         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
908         if (!dc_vbios) {
909                 dm_error("%s: failed to create vbios\n", __func__);
910                 goto fail;
911         }
912
913         dc->bw_vbios = dc_vbios;
914 #ifdef CONFIG_DRM_AMD_DC_DCN
915         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
916         if (!dcn_soc) {
917                 dm_error("%s: failed to create dcn_soc\n", __func__);
918                 goto fail;
919         }
920
921         dc->dcn_soc = dcn_soc;
922
923         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
924         if (!dcn_ip) {
925                 dm_error("%s: failed to create dcn_ip\n", __func__);
926                 goto fail;
927         }
928
929         dc->dcn_ip = dcn_ip;
930 #endif
931
932         if (!dc_construct_ctx(dc, init_params)) {
933                 dm_error("%s: failed to create ctx\n", __func__);
934                 goto fail;
935         }
936
937         dc_ctx = dc->ctx;
938
939         /* Resource should construct all asic specific resources.
940          * This should be the only place where we need to parse the asic id
941          */
942         if (init_params->vbios_override)
943                 dc_ctx->dc_bios = init_params->vbios_override;
944         else {
945                 /* Create BIOS parser */
946                 struct bp_init_data bp_init_data;
947
948                 bp_init_data.ctx = dc_ctx;
949                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
950
951                 dc_ctx->dc_bios = dal_bios_parser_create(
952                                 &bp_init_data, dc_ctx->dce_version);
953
954                 if (!dc_ctx->dc_bios) {
955                         ASSERT_CRITICAL(false);
956                         goto fail;
957                 }
958
959                 dc_ctx->created_bios = true;
960         }
961
962         dc->vendor_signature = init_params->vendor_signature;
963
964         /* Create GPIO service */
965         dc_ctx->gpio_service = dal_gpio_service_create(
966                         dc_ctx->dce_version,
967                         dc_ctx->dce_environment,
968                         dc_ctx);
969
970         if (!dc_ctx->gpio_service) {
971                 ASSERT_CRITICAL(false);
972                 goto fail;
973         }
974
975         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
976         if (!dc->res_pool)
977                 goto fail;
978
979         /* set i2c speed if not done by the respective dcnxxx__resource.c */
980         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
981                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
982
983         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
984         if (!dc->clk_mgr)
985                 goto fail;
986 #ifdef CONFIG_DRM_AMD_DC_DCN
987         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
988
989         if (dc->res_pool->funcs->update_bw_bounding_box) {
990                 DC_FP_START();
991                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
992                 DC_FP_END();
993         }
994 #endif
995
996         /* Creation of current_state must occur after dc->dml
997          * is initialized in dc_create_resource_pool because
998          * on creation it copies the contents of dc->dml
999          */
1000
1001         dc->current_state = dc_create_state(dc);
1002
1003         if (!dc->current_state) {
1004                 dm_error("%s: failed to create validate ctx\n", __func__);
1005                 goto fail;
1006         }
1007
1008         if (!create_links(dc, init_params->num_virtual_links))
1009                 goto fail;
1010
1011         /* Create additional DIG link encoder objects if fewer than the platform
1012          * supports were created during link construction.
1013          */
1014         if (!create_link_encoders(dc))
1015                 goto fail;
1016
1017         dc_resource_state_construct(dc, dc->current_state);
1018
1019         return true;
1020
1021 fail:
1022         return false;
1023 }
1024
1025 static void disable_all_writeback_pipes_for_stream(
1026                 const struct dc *dc,
1027                 struct dc_stream_state *stream,
1028                 struct dc_state *context)
1029 {
1030         int i;
1031
1032         for (i = 0; i < stream->num_wb_info; i++)
1033                 stream->writeback_info[i].wb_enabled = false;
1034 }
1035
1036 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1037                                           struct dc_stream_state *stream, bool lock)
1038 {
1039         int i;
1040
1041         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1042         if (dc->hwss.interdependent_update_lock)
1043                 dc->hwss.interdependent_update_lock(dc, context, lock);
1044         else {
1045                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1046                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1047                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1048
1049                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1050                         if (stream == pipe_ctx->stream) {
1051                                 if (!pipe_ctx->top_pipe &&
1052                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1053                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1054                         }
1055                 }
1056         }
1057 }
1058
1059 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1060 {
1061         int i, j;
1062         struct dc_state *dangling_context = dc_create_state(dc);
1063         struct dc_state *current_ctx;
1064
1065         if (dangling_context == NULL)
1066                 return;
1067
1068         dc_resource_state_copy_construct(dc->current_state, dangling_context);
1069
1070         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1071                 struct dc_stream_state *old_stream =
1072                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1073                 bool should_disable = true;
1074                 bool pipe_split_change =
1075                         context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1076
1077                 for (j = 0; j < context->stream_count; j++) {
1078                         if (old_stream == context->streams[j]) {
1079                                 should_disable = false;
1080                                 break;
1081                         }
1082                 }
1083                 if (!should_disable && pipe_split_change)
1084                         should_disable = true;
1085
1086                 if (should_disable && old_stream) {
1087                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1088                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1089
1090                         if (dc->hwss.apply_ctx_for_surface) {
1091                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1092                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1093                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1094                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1095                         }
1096                         if (dc->hwss.program_front_end_for_ctx) {
1097                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1098                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1099                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1100                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1101                         }
1102                 }
1103         }
1104
1105         current_ctx = dc->current_state;
1106         dc->current_state = dangling_context;
1107         dc_release_state(current_ctx);
1108 }
1109
1110 static void disable_vbios_mode_if_required(
1111                 struct dc *dc,
1112                 struct dc_state *context)
1113 {
1114         unsigned int i, j;
1115
1116         /* check if timing_changed, disable stream*/
1117         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1118                 struct dc_stream_state *stream = NULL;
1119                 struct dc_link *link = NULL;
1120                 struct pipe_ctx *pipe = NULL;
1121
1122                 pipe = &context->res_ctx.pipe_ctx[i];
1123                 stream = pipe->stream;
1124                 if (stream == NULL)
1125                         continue;
1126
1127                 // only looking for first odm pipe
1128                 if (pipe->prev_odm_pipe)
1129                         continue;
1130
1131                 if (stream->link->local_sink &&
1132                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1133                         link = stream->link;
1134                 }
1135
1136                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1137                         unsigned int enc_inst, tg_inst = 0;
1138                         unsigned int pix_clk_100hz;
1139
1140                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1141                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1142                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1143                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1144                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1145                                                         dc->res_pool->stream_enc[j]);
1146                                                 break;
1147                                         }
1148                                 }
1149
1150                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1151                                         dc->res_pool->dp_clock_source,
1152                                         tg_inst, &pix_clk_100hz);
1153
1154                                 if (link->link_status.link_active) {
1155                                         uint32_t requested_pix_clk_100hz =
1156                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1157
1158                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1159                                                 core_link_disable_stream(pipe);
1160                                                 pipe->stream->dpms_off = false;
1161                                         }
1162                                 }
1163                         }
1164                 }
1165         }
1166 }
1167
1168 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1169 {
1170         int i;
1171         PERF_TRACE();
1172         for (i = 0; i < MAX_PIPES; i++) {
1173                 int count = 0;
1174                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1175
1176                 if (!pipe->plane_state)
1177                         continue;
1178
1179                 /* Timeout 100 ms */
1180                 while (count < 100000) {
1181                         /* Must set to false to start with, due to OR in update function */
1182                         pipe->plane_state->status.is_flip_pending = false;
1183                         dc->hwss.update_pending_status(pipe);
1184                         if (!pipe->plane_state->status.is_flip_pending)
1185                                 break;
1186                         udelay(1);
1187                         count++;
1188                 }
1189                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1190         }
1191         PERF_TRACE();
1192 }
1193
1194 /*******************************************************************************
1195  * Public functions
1196  ******************************************************************************/
1197
1198 struct dc *dc_create(const struct dc_init_data *init_params)
1199 {
1200         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1201         unsigned int full_pipe_count;
1202
1203         if (!dc)
1204                 return NULL;
1205
1206         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1207                 if (!dc_construct_ctx(dc, init_params))
1208                         goto destruct_dc;
1209         } else {
1210                 if (!dc_construct(dc, init_params))
1211                         goto destruct_dc;
1212
1213                 full_pipe_count = dc->res_pool->pipe_count;
1214                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1215                         full_pipe_count--;
1216                 dc->caps.max_streams = min(
1217                                 full_pipe_count,
1218                                 dc->res_pool->stream_enc_count);
1219
1220                 dc->caps.max_links = dc->link_count;
1221                 dc->caps.max_audios = dc->res_pool->audio_count;
1222                 dc->caps.linear_pitch_alignment = 64;
1223
1224                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1225
1226                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1227
1228                 if (dc->res_pool->dmcu != NULL)
1229                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1230         }
1231
1232         /* Populate versioning information */
1233         dc->versions.dc_ver = DC_VER;
1234
1235         dc->build_id = DC_BUILD_ID;
1236
1237         DC_LOG_DC("Display Core initialized\n");
1238
1239
1240
1241         return dc;
1242
1243 destruct_dc:
1244         dc_destruct(dc);
1245         kfree(dc);
1246         return NULL;
1247 }
1248
1249 static void detect_edp_presence(struct dc *dc)
1250 {
1251         struct dc_link *edp_links[MAX_NUM_EDP];
1252         struct dc_link *edp_link = NULL;
1253         enum dc_connection_type type;
1254         int i;
1255         int edp_num;
1256
1257         get_edp_links(dc, edp_links, &edp_num);
1258         if (!edp_num)
1259                 return;
1260
1261         for (i = 0; i < edp_num; i++) {
1262                 edp_link = edp_links[i];
1263                 if (dc->config.edp_not_connected) {
1264                         edp_link->edp_sink_present = false;
1265                 } else {
1266                         dc_link_detect_sink(edp_link, &type);
1267                         edp_link->edp_sink_present = (type != dc_connection_none);
1268                 }
1269         }
1270 }
1271
1272 void dc_hardware_init(struct dc *dc)
1273 {
1274
1275         detect_edp_presence(dc);
1276         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1277                 dc->hwss.init_hw(dc);
1278 }
1279
1280 void dc_init_callbacks(struct dc *dc,
1281                 const struct dc_callback_init *init_params)
1282 {
1283 #ifdef CONFIG_DRM_AMD_DC_HDCP
1284         dc->ctx->cp_psp = init_params->cp_psp;
1285 #endif
1286 }
1287
1288 void dc_deinit_callbacks(struct dc *dc)
1289 {
1290 #ifdef CONFIG_DRM_AMD_DC_HDCP
1291         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1292 #endif
1293 }
1294
1295 void dc_destroy(struct dc **dc)
1296 {
1297         dc_destruct(*dc);
1298         kfree(*dc);
1299         *dc = NULL;
1300 }
1301
1302 static void enable_timing_multisync(
1303                 struct dc *dc,
1304                 struct dc_state *ctx)
1305 {
1306         int i, multisync_count = 0;
1307         int pipe_count = dc->res_pool->pipe_count;
1308         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1309
1310         for (i = 0; i < pipe_count; i++) {
1311                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1312                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1313                         continue;
1314                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1315                         continue;
1316                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1317                 multisync_count++;
1318         }
1319
1320         if (multisync_count > 0) {
1321                 dc->hwss.enable_per_frame_crtc_position_reset(
1322                         dc, multisync_count, multisync_pipes);
1323         }
1324 }
1325
1326 static void program_timing_sync(
1327                 struct dc *dc,
1328                 struct dc_state *ctx)
1329 {
1330         int i, j, k;
1331         int group_index = 0;
1332         int num_group = 0;
1333         int pipe_count = dc->res_pool->pipe_count;
1334         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1335
1336         for (i = 0; i < pipe_count; i++) {
1337                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1338                         continue;
1339
1340                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1341         }
1342
1343         for (i = 0; i < pipe_count; i++) {
1344                 int group_size = 1;
1345                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1346                 struct pipe_ctx *pipe_set[MAX_PIPES];
1347
1348                 if (!unsynced_pipes[i])
1349                         continue;
1350
1351                 pipe_set[0] = unsynced_pipes[i];
1352                 unsynced_pipes[i] = NULL;
1353
1354                 /* Add tg to the set, search rest of the tg's for ones with
1355                  * same timing, add all tgs with same timing to the group
1356                  */
1357                 for (j = i + 1; j < pipe_count; j++) {
1358                         if (!unsynced_pipes[j])
1359                                 continue;
1360                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1361                                 dc->hwss.enable_vblanks_synchronization &&
1362                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1363                                 resource_are_vblanks_synchronizable(
1364                                         unsynced_pipes[j]->stream,
1365                                         pipe_set[0]->stream)) {
1366                                 sync_type = VBLANK_SYNCHRONIZABLE;
1367                                 pipe_set[group_size] = unsynced_pipes[j];
1368                                 unsynced_pipes[j] = NULL;
1369                                 group_size++;
1370                         } else
1371                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1372                                 resource_are_streams_timing_synchronizable(
1373                                         unsynced_pipes[j]->stream,
1374                                         pipe_set[0]->stream)) {
1375                                 sync_type = TIMING_SYNCHRONIZABLE;
1376                                 pipe_set[group_size] = unsynced_pipes[j];
1377                                 unsynced_pipes[j] = NULL;
1378                                 group_size++;
1379                         }
1380                 }
1381
1382                 /* set first unblanked pipe as master */
1383                 for (j = 0; j < group_size; j++) {
1384                         bool is_blanked;
1385
1386                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1387                                 is_blanked =
1388                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1389                         else
1390                                 is_blanked =
1391                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1392                         if (!is_blanked) {
1393                                 if (j == 0)
1394                                         break;
1395
1396                                 swap(pipe_set[0], pipe_set[j]);
1397                                 break;
1398                         }
1399                 }
1400
1401                 for (k = 0; k < group_size; k++) {
1402                         struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1403
1404                         status->timing_sync_info.group_id = num_group;
1405                         status->timing_sync_info.group_size = group_size;
1406                         if (k == 0)
1407                                 status->timing_sync_info.master = true;
1408                         else
1409                                 status->timing_sync_info.master = false;
1410
1411                 }
1412
1413                 /* remove any other pipes that are already been synced */
1414                 if (dc->config.use_pipe_ctx_sync_logic) {
1415                         /* check pipe's syncd to decide which pipe to be removed */
1416                         for (j = 1; j < group_size; j++) {
1417                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1418                                         group_size--;
1419                                         pipe_set[j] = pipe_set[group_size];
1420                                         j--;
1421                                 } else
1422                                         /* link slave pipe's syncd with master pipe */
1423                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1424                         }
1425                 } else {
1426                         for (j = j + 1; j < group_size; j++) {
1427                                 bool is_blanked;
1428
1429                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1430                                         is_blanked =
1431                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1432                                 else
1433                                         is_blanked =
1434                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1435                                 if (!is_blanked) {
1436                                         group_size--;
1437                                         pipe_set[j] = pipe_set[group_size];
1438                                         j--;
1439                                 }
1440                         }
1441                 }
1442
1443                 if (group_size > 1) {
1444                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1445                                 dc->hwss.enable_timing_synchronization(
1446                                         dc, group_index, group_size, pipe_set);
1447                         } else
1448                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1449                                 dc->hwss.enable_vblanks_synchronization(
1450                                         dc, group_index, group_size, pipe_set);
1451                                 }
1452                         group_index++;
1453                 }
1454                 num_group++;
1455         }
1456 }
1457
1458 static bool context_changed(
1459                 struct dc *dc,
1460                 struct dc_state *context)
1461 {
1462         uint8_t i;
1463
1464         if (context->stream_count != dc->current_state->stream_count)
1465                 return true;
1466
1467         for (i = 0; i < dc->current_state->stream_count; i++) {
1468                 if (dc->current_state->streams[i] != context->streams[i])
1469                         return true;
1470         }
1471
1472         return false;
1473 }
1474
1475 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1476                                 const struct dc_sink *sink,
1477                                 struct dc_crtc_timing *crtc_timing)
1478 {
1479         struct timing_generator *tg;
1480         struct stream_encoder *se = NULL;
1481
1482         struct dc_crtc_timing hw_crtc_timing = {0};
1483
1484         struct dc_link *link = sink->link;
1485         unsigned int i, enc_inst, tg_inst = 0;
1486
1487         /* Support seamless boot on EDP displays only */
1488         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1489                 return false;
1490         }
1491
1492         /* Check for enabled DIG to identify enabled display */
1493         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1494                 return false;
1495
1496         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1497
1498         if (enc_inst == ENGINE_ID_UNKNOWN)
1499                 return false;
1500
1501         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1502                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1503
1504                         se = dc->res_pool->stream_enc[i];
1505
1506                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1507                                 dc->res_pool->stream_enc[i]);
1508                         break;
1509                 }
1510         }
1511
1512         // tg_inst not found
1513         if (i == dc->res_pool->stream_enc_count)
1514                 return false;
1515
1516         if (tg_inst >= dc->res_pool->timing_generator_count)
1517                 return false;
1518
1519         tg = dc->res_pool->timing_generators[tg_inst];
1520
1521         if (!tg->funcs->get_hw_timing)
1522                 return false;
1523
1524         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1525                 return false;
1526
1527         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1528                 return false;
1529
1530         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1531                 return false;
1532
1533         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1534                 return false;
1535
1536         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1537                 return false;
1538
1539         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1540                 return false;
1541
1542         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1543                 return false;
1544
1545         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1546                 return false;
1547
1548         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1549                 return false;
1550
1551         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1552                 return false;
1553
1554         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1555                 return false;
1556
1557         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1558                 return false;
1559
1560         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1561                 return false;
1562
1563         /* block DSC for now, as VBIOS does not currently support DSC timings */
1564         if (crtc_timing->flags.DSC)
1565                 return false;
1566
1567         if (dc_is_dp_signal(link->connector_signal)) {
1568                 unsigned int pix_clk_100hz;
1569
1570                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1571                         dc->res_pool->dp_clock_source,
1572                         tg_inst, &pix_clk_100hz);
1573
1574                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1575                         return false;
1576
1577                 if (!se->funcs->dp_get_pixel_format)
1578                         return false;
1579
1580                 if (!se->funcs->dp_get_pixel_format(
1581                         se,
1582                         &hw_crtc_timing.pixel_encoding,
1583                         &hw_crtc_timing.display_color_depth))
1584                         return false;
1585
1586                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1587                         return false;
1588
1589                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1590                         return false;
1591         }
1592
1593         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1594                 return false;
1595         }
1596
1597         if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1598                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1599                 return false;
1600         }
1601
1602         return true;
1603 }
1604
1605 static inline bool should_update_pipe_for_stream(
1606                 struct dc_state *context,
1607                 struct pipe_ctx *pipe_ctx,
1608                 struct dc_stream_state *stream)
1609 {
1610         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1611 }
1612
1613 static inline bool should_update_pipe_for_plane(
1614                 struct dc_state *context,
1615                 struct pipe_ctx *pipe_ctx,
1616                 struct dc_plane_state *plane_state)
1617 {
1618         return (pipe_ctx->plane_state == plane_state);
1619 }
1620
1621 void dc_enable_stereo(
1622         struct dc *dc,
1623         struct dc_state *context,
1624         struct dc_stream_state *streams[],
1625         uint8_t stream_count)
1626 {
1627         int i, j;
1628         struct pipe_ctx *pipe;
1629
1630         for (i = 0; i < MAX_PIPES; i++) {
1631                 if (context != NULL) {
1632                         pipe = &context->res_ctx.pipe_ctx[i];
1633                 } else {
1634                         context = dc->current_state;
1635                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1636                 }
1637
1638                 for (j = 0; pipe && j < stream_count; j++)  {
1639                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1640                                 dc->hwss.setup_stereo)
1641                                 dc->hwss.setup_stereo(pipe, dc);
1642                 }
1643         }
1644 }
1645
1646 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1647 {
1648         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1649                 enable_timing_multisync(dc, context);
1650                 program_timing_sync(dc, context);
1651         }
1652 }
1653
1654 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1655 {
1656         int i;
1657         unsigned int stream_mask = 0;
1658
1659         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1660                 if (context->res_ctx.pipe_ctx[i].stream)
1661                         stream_mask |= 1 << i;
1662         }
1663
1664         return stream_mask;
1665 }
1666
1667 #if defined(CONFIG_DRM_AMD_DC_DCN)
1668 void dc_z10_restore(const struct dc *dc)
1669 {
1670         if (dc->hwss.z10_restore)
1671                 dc->hwss.z10_restore(dc);
1672 }
1673
1674 void dc_z10_save_init(struct dc *dc)
1675 {
1676         if (dc->hwss.z10_save_init)
1677                 dc->hwss.z10_save_init(dc);
1678 }
1679 #endif
1680 /*
1681  * Applies given context to HW and copy it into current context.
1682  * It's up to the user to release the src context afterwards.
1683  */
1684 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1685 {
1686         struct dc_bios *dcb = dc->ctx->dc_bios;
1687         enum dc_status result = DC_ERROR_UNEXPECTED;
1688         struct pipe_ctx *pipe;
1689         int i, k, l;
1690         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1691
1692 #if defined(CONFIG_DRM_AMD_DC_DCN)
1693         dc_z10_restore(dc);
1694         dc_allow_idle_optimizations(dc, false);
1695 #endif
1696
1697         for (i = 0; i < context->stream_count; i++)
1698                 dc_streams[i] =  context->streams[i];
1699
1700         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1701                 disable_vbios_mode_if_required(dc, context);
1702                 dc->hwss.enable_accelerated_mode(dc, context);
1703         }
1704
1705         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1706                 context->stream_count == 0)
1707                 dc->hwss.prepare_bandwidth(dc, context);
1708
1709         disable_dangling_plane(dc, context);
1710         /* re-program planes for existing stream, in case we need to
1711          * free up plane resource for later use
1712          */
1713         if (dc->hwss.apply_ctx_for_surface) {
1714                 for (i = 0; i < context->stream_count; i++) {
1715                         if (context->streams[i]->mode_changed)
1716                                 continue;
1717                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1718                         dc->hwss.apply_ctx_for_surface(
1719                                 dc, context->streams[i],
1720                                 context->stream_status[i].plane_count,
1721                                 context); /* use new pipe config in new context */
1722                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1723                         dc->hwss.post_unlock_program_front_end(dc, context);
1724                 }
1725         }
1726
1727         /* Program hardware */
1728         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1729                 pipe = &context->res_ctx.pipe_ctx[i];
1730                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1731         }
1732
1733         result = dc->hwss.apply_ctx_to_hw(dc, context);
1734
1735         if (result != DC_OK)
1736                 return result;
1737
1738         dc_trigger_sync(dc, context);
1739
1740         /* Program all planes within new context*/
1741         if (dc->hwss.program_front_end_for_ctx) {
1742                 dc->hwss.interdependent_update_lock(dc, context, true);
1743                 dc->hwss.program_front_end_for_ctx(dc, context);
1744                 dc->hwss.interdependent_update_lock(dc, context, false);
1745                 dc->hwss.post_unlock_program_front_end(dc, context);
1746         }
1747         for (i = 0; i < context->stream_count; i++) {
1748                 const struct dc_link *link = context->streams[i]->link;
1749
1750                 if (!context->streams[i]->mode_changed)
1751                         continue;
1752
1753                 if (dc->hwss.apply_ctx_for_surface) {
1754                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1755                         dc->hwss.apply_ctx_for_surface(
1756                                         dc, context->streams[i],
1757                                         context->stream_status[i].plane_count,
1758                                         context);
1759                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1760                         dc->hwss.post_unlock_program_front_end(dc, context);
1761                 }
1762
1763                 /*
1764                  * enable stereo
1765                  * TODO rework dc_enable_stereo call to work with validation sets?
1766                  */
1767                 for (k = 0; k < MAX_PIPES; k++) {
1768                         pipe = &context->res_ctx.pipe_ctx[k];
1769
1770                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
1771                                 if (context->streams[l] &&
1772                                         context->streams[l] == pipe->stream &&
1773                                         dc->hwss.setup_stereo)
1774                                         dc->hwss.setup_stereo(pipe, dc);
1775                         }
1776                 }
1777
1778                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1779                                 context->streams[i]->timing.h_addressable,
1780                                 context->streams[i]->timing.v_addressable,
1781                                 context->streams[i]->timing.h_total,
1782                                 context->streams[i]->timing.v_total,
1783                                 context->streams[i]->timing.pix_clk_100hz / 10);
1784         }
1785
1786         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1787
1788         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1789                 context->stream_count == 0) {
1790                 /* Must wait for no flips to be pending before doing optimize bw */
1791                 wait_for_no_pipes_pending(dc, context);
1792                 /* pplib is notified if disp_num changed */
1793                 dc->hwss.optimize_bandwidth(dc, context);
1794         }
1795
1796         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1797                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1798         else
1799                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1800
1801         context->stream_mask = get_stream_mask(dc, context);
1802
1803         if (context->stream_mask != dc->current_state->stream_mask)
1804                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1805
1806         for (i = 0; i < context->stream_count; i++)
1807                 context->streams[i]->mode_changed = false;
1808
1809         dc_release_state(dc->current_state);
1810
1811         dc->current_state = context;
1812
1813         dc_retain_state(dc->current_state);
1814
1815         return result;
1816 }
1817
1818 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1819 {
1820         enum dc_status result = DC_ERROR_UNEXPECTED;
1821         int i;
1822
1823         if (!context_changed(dc, context))
1824                 return DC_OK;
1825
1826         DC_LOG_DC("%s: %d streams\n",
1827                                 __func__, context->stream_count);
1828
1829         for (i = 0; i < context->stream_count; i++) {
1830                 struct dc_stream_state *stream = context->streams[i];
1831
1832                 dc_stream_log(dc, stream);
1833         }
1834
1835         /*
1836          * Previous validation was perfomred with fast_validation = true and
1837          * the full DML state required for hardware programming was skipped.
1838          *
1839          * Re-validate here to calculate these parameters / watermarks.
1840          */
1841         result = dc_validate_global_state(dc, context, false);
1842         if (result != DC_OK) {
1843                 DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1844                              dc_status_to_str(result), result);
1845                 return result;
1846         }
1847
1848         result = dc_commit_state_no_check(dc, context);
1849
1850         return (result == DC_OK);
1851 }
1852
1853 #if defined(CONFIG_DRM_AMD_DC_DCN)
1854 bool dc_acquire_release_mpc_3dlut(
1855                 struct dc *dc, bool acquire,
1856                 struct dc_stream_state *stream,
1857                 struct dc_3dlut **lut,
1858                 struct dc_transfer_func **shaper)
1859 {
1860         int pipe_idx;
1861         bool ret = false;
1862         bool found_pipe_idx = false;
1863         const struct resource_pool *pool = dc->res_pool;
1864         struct resource_context *res_ctx = &dc->current_state->res_ctx;
1865         int mpcc_id = 0;
1866
1867         if (pool && res_ctx) {
1868                 if (acquire) {
1869                         /*find pipe idx for the given stream*/
1870                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1871                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1872                                         found_pipe_idx = true;
1873                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1874                                         break;
1875                                 }
1876                         }
1877                 } else
1878                         found_pipe_idx = true;/*for release pipe_idx is not required*/
1879
1880                 if (found_pipe_idx) {
1881                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1882                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1883                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1884                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1885                 }
1886         }
1887         return ret;
1888 }
1889 #endif
1890 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1891 {
1892         int i;
1893         struct pipe_ctx *pipe;
1894
1895         for (i = 0; i < MAX_PIPES; i++) {
1896                 pipe = &context->res_ctx.pipe_ctx[i];
1897
1898                 if (!pipe->plane_state)
1899                         continue;
1900
1901                 /* Must set to false to start with, due to OR in update function */
1902                 pipe->plane_state->status.is_flip_pending = false;
1903                 dc->hwss.update_pending_status(pipe);
1904                 if (pipe->plane_state->status.is_flip_pending)
1905                         return true;
1906         }
1907         return false;
1908 }
1909
1910 #ifdef CONFIG_DRM_AMD_DC_DCN
1911 /* Perform updates here which need to be deferred until next vupdate
1912  *
1913  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1914  * but forcing lut memory to shutdown state is immediate. This causes
1915  * single frame corruption as lut gets disabled mid-frame unless shutdown
1916  * is deferred until after entering bypass.
1917  */
1918 static void process_deferred_updates(struct dc *dc)
1919 {
1920         int i = 0;
1921
1922         if (dc->debug.enable_mem_low_power.bits.cm) {
1923                 ASSERT(dc->dcn_ip->max_num_dpp);
1924                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1925                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1926                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1927         }
1928 }
1929 #endif /* CONFIG_DRM_AMD_DC_DCN */
1930
1931 void dc_post_update_surfaces_to_stream(struct dc *dc)
1932 {
1933         int i;
1934         struct dc_state *context = dc->current_state;
1935
1936         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1937                 return;
1938
1939         post_surface_trace(dc);
1940
1941         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1942                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1943         else
1944                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1945
1946         if (is_flip_pending_in_pipes(dc, context))
1947                 return;
1948
1949         for (i = 0; i < dc->res_pool->pipe_count; i++)
1950                 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1951                     context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1952                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1953                         dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1954                 }
1955
1956 #ifdef CONFIG_DRM_AMD_DC_DCN
1957         process_deferred_updates(dc);
1958 #endif
1959
1960         dc->hwss.optimize_bandwidth(dc, context);
1961
1962         dc->optimized_required = false;
1963         dc->wm_optimized_required = false;
1964 }
1965
1966 static void init_state(struct dc *dc, struct dc_state *context)
1967 {
1968         /* Each context must have their own instance of VBA and in order to
1969          * initialize and obtain IP and SOC the base DML instance from DC is
1970          * initially copied into every context
1971          */
1972 #ifdef CONFIG_DRM_AMD_DC_DCN
1973         memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1974 #endif
1975 }
1976
1977 struct dc_state *dc_create_state(struct dc *dc)
1978 {
1979         struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1980                                             GFP_KERNEL);
1981
1982         if (!context)
1983                 return NULL;
1984
1985         init_state(dc, context);
1986
1987         kref_init(&context->refcount);
1988
1989         return context;
1990 }
1991
1992 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1993 {
1994         int i, j;
1995         struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1996
1997         if (!new_ctx)
1998                 return NULL;
1999         memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2000
2001         for (i = 0; i < MAX_PIPES; i++) {
2002                         struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2003
2004                         if (cur_pipe->top_pipe)
2005                                 cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2006
2007                         if (cur_pipe->bottom_pipe)
2008                                 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2009
2010                         if (cur_pipe->prev_odm_pipe)
2011                                 cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2012
2013                         if (cur_pipe->next_odm_pipe)
2014                                 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2015
2016         }
2017
2018         for (i = 0; i < new_ctx->stream_count; i++) {
2019                         dc_stream_retain(new_ctx->streams[i]);
2020                         for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2021                                 dc_plane_state_retain(
2022                                         new_ctx->stream_status[i].plane_states[j]);
2023         }
2024
2025         kref_init(&new_ctx->refcount);
2026
2027         return new_ctx;
2028 }
2029
2030 void dc_retain_state(struct dc_state *context)
2031 {
2032         kref_get(&context->refcount);
2033 }
2034
2035 static void dc_state_free(struct kref *kref)
2036 {
2037         struct dc_state *context = container_of(kref, struct dc_state, refcount);
2038         dc_resource_state_destruct(context);
2039         kvfree(context);
2040 }
2041
2042 void dc_release_state(struct dc_state *context)
2043 {
2044         kref_put(&context->refcount, dc_state_free);
2045 }
2046
2047 bool dc_set_generic_gpio_for_stereo(bool enable,
2048                 struct gpio_service *gpio_service)
2049 {
2050         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2051         struct gpio_pin_info pin_info;
2052         struct gpio *generic;
2053         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2054                            GFP_KERNEL);
2055
2056         if (!config)
2057                 return false;
2058         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2059
2060         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2061                 kfree(config);
2062                 return false;
2063         } else {
2064                 generic = dal_gpio_service_create_generic_mux(
2065                         gpio_service,
2066                         pin_info.offset,
2067                         pin_info.mask);
2068         }
2069
2070         if (!generic) {
2071                 kfree(config);
2072                 return false;
2073         }
2074
2075         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2076
2077         config->enable_output_from_mux = enable;
2078         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2079
2080         if (gpio_result == GPIO_RESULT_OK)
2081                 gpio_result = dal_mux_setup_config(generic, config);
2082
2083         if (gpio_result == GPIO_RESULT_OK) {
2084                 dal_gpio_close(generic);
2085                 dal_gpio_destroy_generic_mux(&generic);
2086                 kfree(config);
2087                 return true;
2088         } else {
2089                 dal_gpio_close(generic);
2090                 dal_gpio_destroy_generic_mux(&generic);
2091                 kfree(config);
2092                 return false;
2093         }
2094 }
2095
2096 static bool is_surface_in_context(
2097                 const struct dc_state *context,
2098                 const struct dc_plane_state *plane_state)
2099 {
2100         int j;
2101
2102         for (j = 0; j < MAX_PIPES; j++) {
2103                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2104
2105                 if (plane_state == pipe_ctx->plane_state) {
2106                         return true;
2107                 }
2108         }
2109
2110         return false;
2111 }
2112
2113 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2114 {
2115         union surface_update_flags *update_flags = &u->surface->update_flags;
2116         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2117
2118         if (!u->plane_info)
2119                 return UPDATE_TYPE_FAST;
2120
2121         if (u->plane_info->color_space != u->surface->color_space) {
2122                 update_flags->bits.color_space_change = 1;
2123                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2124         }
2125
2126         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2127                 update_flags->bits.horizontal_mirror_change = 1;
2128                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2129         }
2130
2131         if (u->plane_info->rotation != u->surface->rotation) {
2132                 update_flags->bits.rotation_change = 1;
2133                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2134         }
2135
2136         if (u->plane_info->format != u->surface->format) {
2137                 update_flags->bits.pixel_format_change = 1;
2138                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2139         }
2140
2141         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2142                 update_flags->bits.stereo_format_change = 1;
2143                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2144         }
2145
2146         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2147                 update_flags->bits.per_pixel_alpha_change = 1;
2148                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2149         }
2150
2151         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2152                 update_flags->bits.global_alpha_change = 1;
2153                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2154         }
2155
2156         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2157                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2158                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2159                 /* During DCC on/off, stutter period is calculated before
2160                  * DCC has fully transitioned. This results in incorrect
2161                  * stutter period calculation. Triggering a full update will
2162                  * recalculate stutter period.
2163                  */
2164                 update_flags->bits.dcc_change = 1;
2165                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2166         }
2167
2168         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2169                         resource_pixel_format_to_bpp(u->surface->format)) {
2170                 /* different bytes per element will require full bandwidth
2171                  * and DML calculation
2172                  */
2173                 update_flags->bits.bpp_change = 1;
2174                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2175         }
2176
2177         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2178                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2179                 update_flags->bits.plane_size_change = 1;
2180                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2181         }
2182
2183
2184         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2185                         sizeof(union dc_tiling_info)) != 0) {
2186                 update_flags->bits.swizzle_change = 1;
2187                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2188
2189                 /* todo: below are HW dependent, we should add a hook to
2190                  * DCE/N resource and validated there.
2191                  */
2192                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2193                         /* swizzled mode requires RQ to be setup properly,
2194                          * thus need to run DML to calculate RQ settings
2195                          */
2196                         update_flags->bits.bandwidth_change = 1;
2197                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2198                 }
2199         }
2200
2201         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2202         return update_type;
2203 }
2204
2205 static enum surface_update_type get_scaling_info_update_type(
2206                 const struct dc_surface_update *u)
2207 {
2208         union surface_update_flags *update_flags = &u->surface->update_flags;
2209
2210         if (!u->scaling_info)
2211                 return UPDATE_TYPE_FAST;
2212
2213         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2214                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2215                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2216                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2217                         || u->scaling_info->scaling_quality.integer_scaling !=
2218                                 u->surface->scaling_quality.integer_scaling
2219                         ) {
2220                 update_flags->bits.scaling_change = 1;
2221
2222                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2223                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2224                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2225                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2226                         /* Making dst rect smaller requires a bandwidth change */
2227                         update_flags->bits.bandwidth_change = 1;
2228         }
2229
2230         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2231                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2232
2233                 update_flags->bits.scaling_change = 1;
2234                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2235                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2236                         /* Making src rect bigger requires a bandwidth change */
2237                         update_flags->bits.clock_change = 1;
2238         }
2239
2240         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2241                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2242                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2243                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2244                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2245                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2246                 update_flags->bits.position_change = 1;
2247
2248         if (update_flags->bits.clock_change
2249                         || update_flags->bits.bandwidth_change
2250                         || update_flags->bits.scaling_change)
2251                 return UPDATE_TYPE_FULL;
2252
2253         if (update_flags->bits.position_change)
2254                 return UPDATE_TYPE_MED;
2255
2256         return UPDATE_TYPE_FAST;
2257 }
2258
2259 static enum surface_update_type det_surface_update(const struct dc *dc,
2260                 const struct dc_surface_update *u)
2261 {
2262         const struct dc_state *context = dc->current_state;
2263         enum surface_update_type type;
2264         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2265         union surface_update_flags *update_flags = &u->surface->update_flags;
2266
2267         if (u->flip_addr)
2268                 update_flags->bits.addr_update = 1;
2269
2270         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2271                 update_flags->raw = 0xFFFFFFFF;
2272                 return UPDATE_TYPE_FULL;
2273         }
2274
2275         update_flags->raw = 0; // Reset all flags
2276
2277         type = get_plane_info_update_type(u);
2278         elevate_update_type(&overall_type, type);
2279
2280         type = get_scaling_info_update_type(u);
2281         elevate_update_type(&overall_type, type);
2282
2283         if (u->flip_addr)
2284                 update_flags->bits.addr_update = 1;
2285
2286         if (u->in_transfer_func)
2287                 update_flags->bits.in_transfer_func_change = 1;
2288
2289         if (u->input_csc_color_matrix)
2290                 update_flags->bits.input_csc_change = 1;
2291
2292         if (u->coeff_reduction_factor)
2293                 update_flags->bits.coeff_reduction_change = 1;
2294
2295         if (u->gamut_remap_matrix)
2296                 update_flags->bits.gamut_remap_change = 1;
2297
2298         if (u->gamma) {
2299                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2300
2301                 if (u->plane_info)
2302                         format = u->plane_info->format;
2303                 else if (u->surface)
2304                         format = u->surface->format;
2305
2306                 if (dce_use_lut(format))
2307                         update_flags->bits.gamma_change = 1;
2308         }
2309
2310         if (u->lut3d_func || u->func_shaper)
2311                 update_flags->bits.lut_3d = 1;
2312
2313         if (u->hdr_mult.value)
2314                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2315                         update_flags->bits.hdr_mult = 1;
2316                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2317                 }
2318
2319         if (update_flags->bits.in_transfer_func_change) {
2320                 type = UPDATE_TYPE_MED;
2321                 elevate_update_type(&overall_type, type);
2322         }
2323
2324         if (update_flags->bits.input_csc_change
2325                         || update_flags->bits.coeff_reduction_change
2326                         || update_flags->bits.lut_3d
2327                         || update_flags->bits.gamma_change
2328                         || update_flags->bits.gamut_remap_change) {
2329                 type = UPDATE_TYPE_FULL;
2330                 elevate_update_type(&overall_type, type);
2331         }
2332
2333         return overall_type;
2334 }
2335
2336 static enum surface_update_type check_update_surfaces_for_stream(
2337                 struct dc *dc,
2338                 struct dc_surface_update *updates,
2339                 int surface_count,
2340                 struct dc_stream_update *stream_update,
2341                 const struct dc_stream_status *stream_status)
2342 {
2343         int i;
2344         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2345
2346 #if defined(CONFIG_DRM_AMD_DC_DCN)
2347         if (dc->idle_optimizations_allowed)
2348                 overall_type = UPDATE_TYPE_FULL;
2349
2350 #endif
2351         if (stream_status == NULL || stream_status->plane_count != surface_count)
2352                 overall_type = UPDATE_TYPE_FULL;
2353
2354         if (stream_update && stream_update->pending_test_pattern) {
2355                 overall_type = UPDATE_TYPE_FULL;
2356         }
2357
2358         /* some stream updates require passive update */
2359         if (stream_update) {
2360                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2361
2362                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2363                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2364                         stream_update->integer_scaling_update)
2365                         su_flags->bits.scaling = 1;
2366
2367                 if (stream_update->out_transfer_func)
2368                         su_flags->bits.out_tf = 1;
2369
2370                 if (stream_update->abm_level)
2371                         su_flags->bits.abm_level = 1;
2372
2373                 if (stream_update->dpms_off)
2374                         su_flags->bits.dpms_off = 1;
2375
2376                 if (stream_update->gamut_remap)
2377                         su_flags->bits.gamut_remap = 1;
2378
2379                 if (stream_update->wb_update)
2380                         su_flags->bits.wb_update = 1;
2381
2382                 if (stream_update->dsc_config)
2383                         su_flags->bits.dsc_changed = 1;
2384
2385 #if defined(CONFIG_DRM_AMD_DC_DCN)
2386                 if (stream_update->mst_bw_update)
2387                         su_flags->bits.mst_bw = 1;
2388 #endif
2389
2390                 if (su_flags->raw != 0)
2391                         overall_type = UPDATE_TYPE_FULL;
2392
2393                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2394                         su_flags->bits.out_csc = 1;
2395         }
2396
2397         for (i = 0 ; i < surface_count; i++) {
2398                 enum surface_update_type type =
2399                                 det_surface_update(dc, &updates[i]);
2400
2401                 elevate_update_type(&overall_type, type);
2402         }
2403
2404         return overall_type;
2405 }
2406
2407 /*
2408  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2409  *
2410  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2411  */
2412 enum surface_update_type dc_check_update_surfaces_for_stream(
2413                 struct dc *dc,
2414                 struct dc_surface_update *updates,
2415                 int surface_count,
2416                 struct dc_stream_update *stream_update,
2417                 const struct dc_stream_status *stream_status)
2418 {
2419         int i;
2420         enum surface_update_type type;
2421
2422         if (stream_update)
2423                 stream_update->stream->update_flags.raw = 0;
2424         for (i = 0; i < surface_count; i++)
2425                 updates[i].surface->update_flags.raw = 0;
2426
2427         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2428         if (type == UPDATE_TYPE_FULL) {
2429                 if (stream_update) {
2430                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2431                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2432                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2433                 }
2434                 for (i = 0; i < surface_count; i++)
2435                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2436         }
2437
2438         if (type == UPDATE_TYPE_FAST) {
2439                 // If there's an available clock comparator, we use that.
2440                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2441                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2442                                 dc->optimized_required = true;
2443                 // Else we fallback to mem compare.
2444                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2445                         dc->optimized_required = true;
2446                 }
2447
2448                 dc->optimized_required |= dc->wm_optimized_required;
2449         }
2450
2451         return type;
2452 }
2453
2454 static struct dc_stream_status *stream_get_status(
2455         struct dc_state *ctx,
2456         struct dc_stream_state *stream)
2457 {
2458         uint8_t i;
2459
2460         for (i = 0; i < ctx->stream_count; i++) {
2461                 if (stream == ctx->streams[i]) {
2462                         return &ctx->stream_status[i];
2463                 }
2464         }
2465
2466         return NULL;
2467 }
2468
2469 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2470
2471 static void copy_surface_update_to_plane(
2472                 struct dc_plane_state *surface,
2473                 struct dc_surface_update *srf_update)
2474 {
2475         if (srf_update->flip_addr) {
2476                 surface->address = srf_update->flip_addr->address;
2477                 surface->flip_immediate =
2478                         srf_update->flip_addr->flip_immediate;
2479                 surface->time.time_elapsed_in_us[surface->time.index] =
2480                         srf_update->flip_addr->flip_timestamp_in_us -
2481                                 surface->time.prev_update_time_in_us;
2482                 surface->time.prev_update_time_in_us =
2483                         srf_update->flip_addr->flip_timestamp_in_us;
2484                 surface->time.index++;
2485                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2486                         surface->time.index = 0;
2487
2488                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2489         }
2490
2491         if (srf_update->scaling_info) {
2492                 surface->scaling_quality =
2493                                 srf_update->scaling_info->scaling_quality;
2494                 surface->dst_rect =
2495                                 srf_update->scaling_info->dst_rect;
2496                 surface->src_rect =
2497                                 srf_update->scaling_info->src_rect;
2498                 surface->clip_rect =
2499                                 srf_update->scaling_info->clip_rect;
2500         }
2501
2502         if (srf_update->plane_info) {
2503                 surface->color_space =
2504                                 srf_update->plane_info->color_space;
2505                 surface->format =
2506                                 srf_update->plane_info->format;
2507                 surface->plane_size =
2508                                 srf_update->plane_info->plane_size;
2509                 surface->rotation =
2510                                 srf_update->plane_info->rotation;
2511                 surface->horizontal_mirror =
2512                                 srf_update->plane_info->horizontal_mirror;
2513                 surface->stereo_format =
2514                                 srf_update->plane_info->stereo_format;
2515                 surface->tiling_info =
2516                                 srf_update->plane_info->tiling_info;
2517                 surface->visible =
2518                                 srf_update->plane_info->visible;
2519                 surface->per_pixel_alpha =
2520                                 srf_update->plane_info->per_pixel_alpha;
2521                 surface->global_alpha =
2522                                 srf_update->plane_info->global_alpha;
2523                 surface->global_alpha_value =
2524                                 srf_update->plane_info->global_alpha_value;
2525                 surface->dcc =
2526                                 srf_update->plane_info->dcc;
2527                 surface->layer_index =
2528                                 srf_update->plane_info->layer_index;
2529         }
2530
2531         if (srf_update->gamma &&
2532                         (surface->gamma_correction !=
2533                                         srf_update->gamma)) {
2534                 memcpy(&surface->gamma_correction->entries,
2535                         &srf_update->gamma->entries,
2536                         sizeof(struct dc_gamma_entries));
2537                 surface->gamma_correction->is_identity =
2538                         srf_update->gamma->is_identity;
2539                 surface->gamma_correction->num_entries =
2540                         srf_update->gamma->num_entries;
2541                 surface->gamma_correction->type =
2542                         srf_update->gamma->type;
2543         }
2544
2545         if (srf_update->in_transfer_func &&
2546                         (surface->in_transfer_func !=
2547                                 srf_update->in_transfer_func)) {
2548                 surface->in_transfer_func->sdr_ref_white_level =
2549                         srf_update->in_transfer_func->sdr_ref_white_level;
2550                 surface->in_transfer_func->tf =
2551                         srf_update->in_transfer_func->tf;
2552                 surface->in_transfer_func->type =
2553                         srf_update->in_transfer_func->type;
2554                 memcpy(&surface->in_transfer_func->tf_pts,
2555                         &srf_update->in_transfer_func->tf_pts,
2556                         sizeof(struct dc_transfer_func_distributed_points));
2557         }
2558
2559         if (srf_update->func_shaper &&
2560                         (surface->in_shaper_func !=
2561                         srf_update->func_shaper))
2562                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2563                 sizeof(*surface->in_shaper_func));
2564
2565         if (srf_update->lut3d_func &&
2566                         (surface->lut3d_func !=
2567                         srf_update->lut3d_func))
2568                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2569                 sizeof(*surface->lut3d_func));
2570
2571         if (srf_update->hdr_mult.value)
2572                 surface->hdr_mult =
2573                                 srf_update->hdr_mult;
2574
2575         if (srf_update->blend_tf &&
2576                         (surface->blend_tf !=
2577                         srf_update->blend_tf))
2578                 memcpy(surface->blend_tf, srf_update->blend_tf,
2579                 sizeof(*surface->blend_tf));
2580
2581         if (srf_update->input_csc_color_matrix)
2582                 surface->input_csc_color_matrix =
2583                         *srf_update->input_csc_color_matrix;
2584
2585         if (srf_update->coeff_reduction_factor)
2586                 surface->coeff_reduction_factor =
2587                         *srf_update->coeff_reduction_factor;
2588
2589         if (srf_update->gamut_remap_matrix)
2590                 surface->gamut_remap_matrix =
2591                         *srf_update->gamut_remap_matrix;
2592 }
2593
2594 static void copy_stream_update_to_stream(struct dc *dc,
2595                                          struct dc_state *context,
2596                                          struct dc_stream_state *stream,
2597                                          struct dc_stream_update *update)
2598 {
2599         struct dc_context *dc_ctx = dc->ctx;
2600
2601         if (update == NULL || stream == NULL)
2602                 return;
2603
2604         if (update->src.height && update->src.width)
2605                 stream->src = update->src;
2606
2607         if (update->dst.height && update->dst.width)
2608                 stream->dst = update->dst;
2609
2610         if (update->out_transfer_func &&
2611             stream->out_transfer_func != update->out_transfer_func) {
2612                 stream->out_transfer_func->sdr_ref_white_level =
2613                         update->out_transfer_func->sdr_ref_white_level;
2614                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2615                 stream->out_transfer_func->type =
2616                         update->out_transfer_func->type;
2617                 memcpy(&stream->out_transfer_func->tf_pts,
2618                        &update->out_transfer_func->tf_pts,
2619                        sizeof(struct dc_transfer_func_distributed_points));
2620         }
2621
2622         if (update->hdr_static_metadata)
2623                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2624
2625         if (update->abm_level)
2626                 stream->abm_level = *update->abm_level;
2627
2628         if (update->periodic_interrupt0)
2629                 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2630
2631         if (update->periodic_interrupt1)
2632                 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2633
2634         if (update->gamut_remap)
2635                 stream->gamut_remap_matrix = *update->gamut_remap;
2636
2637         /* Note: this being updated after mode set is currently not a use case
2638          * however if it arises OCSC would need to be reprogrammed at the
2639          * minimum
2640          */
2641         if (update->output_color_space)
2642                 stream->output_color_space = *update->output_color_space;
2643
2644         if (update->output_csc_transform)
2645                 stream->csc_color_matrix = *update->output_csc_transform;
2646
2647         if (update->vrr_infopacket)
2648                 stream->vrr_infopacket = *update->vrr_infopacket;
2649
2650         if (update->dpms_off)
2651                 stream->dpms_off = *update->dpms_off;
2652
2653         if (update->vsc_infopacket)
2654                 stream->vsc_infopacket = *update->vsc_infopacket;
2655
2656         if (update->vsp_infopacket)
2657                 stream->vsp_infopacket = *update->vsp_infopacket;
2658
2659         if (update->dither_option)
2660                 stream->dither_option = *update->dither_option;
2661
2662         if (update->pending_test_pattern)
2663                 stream->test_pattern = *update->pending_test_pattern;
2664         /* update current stream with writeback info */
2665         if (update->wb_update) {
2666                 int i;
2667
2668                 stream->num_wb_info = update->wb_update->num_wb_info;
2669                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2670                 for (i = 0; i < stream->num_wb_info; i++)
2671                         stream->writeback_info[i] =
2672                                 update->wb_update->writeback_info[i];
2673         }
2674         if (update->dsc_config) {
2675                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2676                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2677                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2678                                        update->dsc_config->num_slices_v != 0);
2679
2680                 /* Use temporarry context for validating new DSC config */
2681                 struct dc_state *dsc_validate_context = dc_create_state(dc);
2682
2683                 if (dsc_validate_context) {
2684                         dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2685
2686                         stream->timing.dsc_cfg = *update->dsc_config;
2687                         stream->timing.flags.DSC = enable_dsc;
2688                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2689                                 stream->timing.dsc_cfg = old_dsc_cfg;
2690                                 stream->timing.flags.DSC = old_dsc_enabled;
2691                                 update->dsc_config = NULL;
2692                         }
2693
2694                         dc_release_state(dsc_validate_context);
2695                 } else {
2696                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
2697                         update->dsc_config = NULL;
2698                 }
2699         }
2700 }
2701
2702 static void commit_planes_do_stream_update(struct dc *dc,
2703                 struct dc_stream_state *stream,
2704                 struct dc_stream_update *stream_update,
2705                 enum surface_update_type update_type,
2706                 struct dc_state *context)
2707 {
2708         int j;
2709
2710         // Stream updates
2711         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2712                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2713
2714                 if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2715
2716                         if (stream_update->periodic_interrupt0 &&
2717                                         dc->hwss.setup_periodic_interrupt)
2718                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2719
2720                         if (stream_update->periodic_interrupt1 &&
2721                                         dc->hwss.setup_periodic_interrupt)
2722                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2723
2724                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2725                                         stream_update->vrr_infopacket ||
2726                                         stream_update->vsc_infopacket ||
2727                                         stream_update->vsp_infopacket) {
2728                                 resource_build_info_frame(pipe_ctx);
2729                                 dc->hwss.update_info_frame(pipe_ctx);
2730                         }
2731
2732                         if (stream_update->hdr_static_metadata &&
2733                                         stream->use_dynamic_meta &&
2734                                         dc->hwss.set_dmdata_attributes &&
2735                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
2736                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
2737
2738                         if (stream_update->gamut_remap)
2739                                 dc_stream_set_gamut_remap(dc, stream);
2740
2741                         if (stream_update->output_csc_transform)
2742                                 dc_stream_program_csc_matrix(dc, stream);
2743
2744                         if (stream_update->dither_option) {
2745                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2746                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2747                                                                         &pipe_ctx->stream->bit_depth_params);
2748                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2749                                                 &stream->bit_depth_params,
2750                                                 &stream->clamping);
2751                                 while (odm_pipe) {
2752                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2753                                                         &stream->bit_depth_params,
2754                                                         &stream->clamping);
2755                                         odm_pipe = odm_pipe->next_odm_pipe;
2756                                 }
2757                         }
2758
2759
2760                         /* Full fe update*/
2761                         if (update_type == UPDATE_TYPE_FAST)
2762                                 continue;
2763
2764                         if (stream_update->dsc_config)
2765                                 dp_update_dsc_config(pipe_ctx);
2766
2767 #if defined(CONFIG_DRM_AMD_DC_DCN)
2768                         if (stream_update->mst_bw_update) {
2769                                 if (stream_update->mst_bw_update->is_increase)
2770                                         dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2771                                 else
2772                                         dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2773                         }
2774 #endif
2775
2776                         if (stream_update->pending_test_pattern) {
2777                                 dc_link_dp_set_test_pattern(stream->link,
2778                                         stream->test_pattern.type,
2779                                         stream->test_pattern.color_space,
2780                                         stream->test_pattern.p_link_settings,
2781                                         stream->test_pattern.p_custom_pattern,
2782                                         stream->test_pattern.cust_pattern_size);
2783                         }
2784
2785                         if (stream_update->dpms_off) {
2786                                 if (*stream_update->dpms_off) {
2787                                         core_link_disable_stream(pipe_ctx);
2788                                         /* for dpms, keep acquired resources*/
2789                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2790                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2791
2792                                         dc->optimized_required = true;
2793
2794                                 } else {
2795                                         if (get_seamless_boot_stream_count(context) == 0)
2796                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2797
2798                                         core_link_enable_stream(dc->current_state, pipe_ctx);
2799                                 }
2800                         }
2801
2802                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2803                                 bool should_program_abm = true;
2804
2805                                 // if otg funcs defined check if blanked before programming
2806                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2807                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2808                                                 should_program_abm = false;
2809
2810                                 if (should_program_abm) {
2811                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2812                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2813                                         } else {
2814                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2815                                                         pipe_ctx->stream_res.abm, stream->abm_level);
2816                                         }
2817                                 }
2818                         }
2819                 }
2820         }
2821 }
2822
2823 static void commit_planes_for_stream(struct dc *dc,
2824                 struct dc_surface_update *srf_updates,
2825                 int surface_count,
2826                 struct dc_stream_state *stream,
2827                 struct dc_stream_update *stream_update,
2828                 enum surface_update_type update_type,
2829                 struct dc_state *context)
2830 {
2831         int i, j;
2832         struct pipe_ctx *top_pipe_to_program = NULL;
2833         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
2834
2835 #if defined(CONFIG_DRM_AMD_DC_DCN)
2836         dc_z10_restore(dc);
2837 #endif
2838
2839         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2840                 /* Optimize seamless boot flag keeps clocks and watermarks high until
2841                  * first flip. After first flip, optimization is required to lower
2842                  * bandwidth. Important to note that it is expected UEFI will
2843                  * only light up a single display on POST, therefore we only expect
2844                  * one stream with seamless boot flag set.
2845                  */
2846                 if (stream->apply_seamless_boot_optimization) {
2847                         stream->apply_seamless_boot_optimization = false;
2848
2849                         if (get_seamless_boot_stream_count(context) == 0)
2850                                 dc->optimized_required = true;
2851                 }
2852         }
2853
2854         if (update_type == UPDATE_TYPE_FULL) {
2855 #if defined(CONFIG_DRM_AMD_DC_DCN)
2856                 dc_allow_idle_optimizations(dc, false);
2857
2858 #endif
2859                 if (get_seamless_boot_stream_count(context) == 0)
2860                         dc->hwss.prepare_bandwidth(dc, context);
2861
2862                 context_clock_trace(dc, context);
2863         }
2864
2865         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2866                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2867
2868                 if (!pipe_ctx->top_pipe &&
2869                         !pipe_ctx->prev_odm_pipe &&
2870                         pipe_ctx->stream &&
2871                         pipe_ctx->stream == stream) {
2872                         top_pipe_to_program = pipe_ctx;
2873                 }
2874         }
2875
2876 #ifdef CONFIG_DRM_AMD_DC_DCN
2877         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2878                 struct pipe_ctx *mpcc_pipe;
2879                 struct pipe_ctx *odm_pipe;
2880
2881                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2882                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2883                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2884         }
2885 #endif
2886
2887         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2888                 if (top_pipe_to_program &&
2889                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2890                         if (should_use_dmub_lock(stream->link)) {
2891                                 union dmub_hw_lock_flags hw_locks = { 0 };
2892                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2893
2894                                 hw_locks.bits.lock_dig = 1;
2895                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2896
2897                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2898                                                         true,
2899                                                         &hw_locks,
2900                                                         &inst_flags);
2901                         } else
2902                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2903                                                 top_pipe_to_program->stream_res.tg);
2904                 }
2905
2906         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2907                 dc->hwss.interdependent_update_lock(dc, context, true);
2908         else
2909                 /* Lock the top pipe while updating plane addrs, since freesync requires
2910                  *  plane addr update event triggers to be synchronized.
2911                  *  top_pipe_to_program is expected to never be NULL
2912                  */
2913                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2914
2915         // Stream updates
2916         if (stream_update)
2917                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2918
2919         if (surface_count == 0) {
2920                 /*
2921                  * In case of turning off screen, no need to program front end a second time.
2922                  * just return after program blank.
2923                  */
2924                 if (dc->hwss.apply_ctx_for_surface)
2925                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2926                 if (dc->hwss.program_front_end_for_ctx)
2927                         dc->hwss.program_front_end_for_ctx(dc, context);
2928
2929                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2930                         dc->hwss.interdependent_update_lock(dc, context, false);
2931                 else
2932                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2933                 dc->hwss.post_unlock_program_front_end(dc, context);
2934                 return;
2935         }
2936
2937         if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2938                 for (i = 0; i < surface_count; i++) {
2939                         struct dc_plane_state *plane_state = srf_updates[i].surface;
2940                         /*set logical flag for lock/unlock use*/
2941                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2942                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2943                                 if (!pipe_ctx->plane_state)
2944                                         continue;
2945                                 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
2946                                         continue;
2947                                 pipe_ctx->plane_state->triplebuffer_flips = false;
2948                                 if (update_type == UPDATE_TYPE_FAST &&
2949                                         dc->hwss.program_triplebuffer != NULL &&
2950                                         !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2951                                                 /*triple buffer for VUpdate  only*/
2952                                                 pipe_ctx->plane_state->triplebuffer_flips = true;
2953                                 }
2954                         }
2955                         if (update_type == UPDATE_TYPE_FULL) {
2956                                 /* force vsync flip when reconfiguring pipes to prevent underflow */
2957                                 plane_state->flip_immediate = false;
2958                         }
2959                 }
2960         }
2961
2962         // Update Type FULL, Surface updates
2963         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2964                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2965
2966                 if (!pipe_ctx->top_pipe &&
2967                         !pipe_ctx->prev_odm_pipe &&
2968                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
2969                         struct dc_stream_status *stream_status = NULL;
2970
2971                         if (!pipe_ctx->plane_state)
2972                                 continue;
2973
2974                         /* Full fe update*/
2975                         if (update_type == UPDATE_TYPE_FAST)
2976                                 continue;
2977
2978                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2979
2980                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2981                                 /*turn off triple buffer for full update*/
2982                                 dc->hwss.program_triplebuffer(
2983                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2984                         }
2985                         stream_status =
2986                                 stream_get_status(context, pipe_ctx->stream);
2987
2988                         if (dc->hwss.apply_ctx_for_surface)
2989                                 dc->hwss.apply_ctx_for_surface(
2990                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
2991                 }
2992         }
2993         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2994                 dc->hwss.program_front_end_for_ctx(dc, context);
2995 #ifdef CONFIG_DRM_AMD_DC_DCN
2996                 if (dc->debug.validate_dml_output) {
2997                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
2998                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
2999                                 if (cur_pipe->stream == NULL)
3000                                         continue;
3001
3002                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3003                                                 cur_pipe->plane_res.hubp, dc->ctx,
3004                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3005                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3006                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3007                         }
3008                 }
3009 #endif
3010         }
3011
3012         // Update Type FAST, Surface updates
3013         if (update_type == UPDATE_TYPE_FAST) {
3014                 if (dc->hwss.set_flip_control_gsl)
3015                         for (i = 0; i < surface_count; i++) {
3016                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3017
3018                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3019                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3020
3021                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3022                                                 continue;
3023
3024                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3025                                                 continue;
3026
3027                                         // GSL has to be used for flip immediate
3028                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3029                                                         pipe_ctx->plane_state->flip_immediate);
3030                                 }
3031                         }
3032
3033                 /* Perform requested Updates */
3034                 for (i = 0; i < surface_count; i++) {
3035                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3036
3037                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3038                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3039
3040                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3041                                         continue;
3042
3043                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3044                                         continue;
3045
3046                                 /*program triple buffer after lock based on flip type*/
3047                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3048                                         /*only enable triplebuffer for  fast_update*/
3049                                         dc->hwss.program_triplebuffer(
3050                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3051                                 }
3052                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3053                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3054                         }
3055                 }
3056
3057         }
3058
3059         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
3060                 dc->hwss.interdependent_update_lock(dc, context, false);
3061         else
3062                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3063
3064         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3065                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3066                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3067                                         top_pipe_to_program->stream_res.tg,
3068                                         CRTC_STATE_VACTIVE);
3069                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3070                                         top_pipe_to_program->stream_res.tg,
3071                                         CRTC_STATE_VBLANK);
3072                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3073                                         top_pipe_to_program->stream_res.tg,
3074                                         CRTC_STATE_VACTIVE);
3075
3076                         if (stream && should_use_dmub_lock(stream->link)) {
3077                                 union dmub_hw_lock_flags hw_locks = { 0 };
3078                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3079
3080                                 hw_locks.bits.lock_dig = 1;
3081                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3082
3083                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3084                                                         false,
3085                                                         &hw_locks,
3086                                                         &inst_flags);
3087                         } else
3088                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3089                                         top_pipe_to_program->stream_res.tg);
3090                 }
3091
3092         if (update_type != UPDATE_TYPE_FAST)
3093                 dc->hwss.post_unlock_program_front_end(dc, context);
3094
3095         // Fire manual trigger only when bottom plane is flipped
3096         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3097                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3098
3099                 if (!pipe_ctx->plane_state)
3100                         continue;
3101
3102                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3103                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3104                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3105                                 pipe_ctx->plane_state->skip_manual_trigger)
3106                         continue;
3107
3108                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3109                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3110         }
3111 }
3112
3113 void dc_commit_updates_for_stream(struct dc *dc,
3114                 struct dc_surface_update *srf_updates,
3115                 int surface_count,
3116                 struct dc_stream_state *stream,
3117                 struct dc_stream_update *stream_update,
3118                 struct dc_state *state)
3119 {
3120         const struct dc_stream_status *stream_status;
3121         enum surface_update_type update_type;
3122         struct dc_state *context;
3123         struct dc_context *dc_ctx = dc->ctx;
3124         int i, j;
3125
3126         stream_status = dc_stream_get_status(stream);
3127         context = dc->current_state;
3128
3129         update_type = dc_check_update_surfaces_for_stream(
3130                                 dc, srf_updates, surface_count, stream_update, stream_status);
3131
3132         if (update_type >= update_surface_trace_level)
3133                 update_surface_trace(dc, srf_updates, surface_count);
3134
3135
3136         if (update_type >= UPDATE_TYPE_FULL) {
3137
3138                 /* initialize scratch memory for building context */
3139                 context = dc_create_state(dc);
3140                 if (context == NULL) {
3141                         DC_ERROR("Failed to allocate new validate context!\n");
3142                         return;
3143                 }
3144
3145                 dc_resource_state_copy_construct(state, context);
3146
3147                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3148                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3149                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3150
3151                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3152                                 new_pipe->plane_state->force_full_update = true;
3153                 }
3154         } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3155                 /*
3156                  * Previous frame finished and HW is ready for optimization.
3157                  *
3158                  * Only relevant for DCN behavior where we can guarantee the optimization
3159                  * is safe to apply - retain the legacy behavior for DCE.
3160                  */
3161                 dc_post_update_surfaces_to_stream(dc);
3162         }
3163
3164
3165         for (i = 0; i < surface_count; i++) {
3166                 struct dc_plane_state *surface = srf_updates[i].surface;
3167
3168                 copy_surface_update_to_plane(surface, &srf_updates[i]);
3169
3170                 if (update_type >= UPDATE_TYPE_MED) {
3171                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3172                                 struct pipe_ctx *pipe_ctx =
3173                                         &context->res_ctx.pipe_ctx[j];
3174
3175                                 if (pipe_ctx->plane_state != surface)
3176                                         continue;
3177
3178                                 resource_build_scaling_params(pipe_ctx);
3179                         }
3180                 }
3181         }
3182
3183         copy_stream_update_to_stream(dc, context, stream, stream_update);
3184
3185         if (update_type >= UPDATE_TYPE_FULL) {
3186                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3187                         DC_ERROR("Mode validation failed for stream update!\n");
3188                         dc_release_state(context);
3189                         return;
3190                 }
3191         }
3192
3193         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3194
3195         commit_planes_for_stream(
3196                                 dc,
3197                                 srf_updates,
3198                                 surface_count,
3199                                 stream,
3200                                 stream_update,
3201                                 update_type,
3202                                 context);
3203         /*update current_State*/
3204         if (dc->current_state != context) {
3205
3206                 struct dc_state *old = dc->current_state;
3207
3208                 dc->current_state = context;
3209                 dc_release_state(old);
3210
3211                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3212                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3213
3214                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3215                                 pipe_ctx->plane_state->force_full_update = false;
3216                 }
3217         }
3218
3219         /* Legacy optimization path for DCE. */
3220         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3221                 dc_post_update_surfaces_to_stream(dc);
3222                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3223         }
3224
3225         return;
3226
3227 }
3228
3229 uint8_t dc_get_current_stream_count(struct dc *dc)
3230 {
3231         return dc->current_state->stream_count;
3232 }
3233
3234 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3235 {
3236         if (i < dc->current_state->stream_count)
3237                 return dc->current_state->streams[i];
3238         return NULL;
3239 }
3240
3241 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
3242 {
3243         uint8_t i;
3244         struct dc_context *ctx = link->ctx;
3245
3246         for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
3247                 if (ctx->dc->current_state->streams[i]->link == link)
3248                         return ctx->dc->current_state->streams[i];
3249         }
3250
3251         return NULL;
3252 }
3253
3254 enum dc_irq_source dc_interrupt_to_irq_source(
3255                 struct dc *dc,
3256                 uint32_t src_id,
3257                 uint32_t ext_id)
3258 {
3259         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3260 }
3261
3262 /*
3263  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3264  */
3265 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3266 {
3267
3268         if (dc == NULL)
3269                 return false;
3270
3271         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3272 }
3273
3274 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3275 {
3276         dal_irq_service_ack(dc->res_pool->irqs, src);
3277 }
3278
3279 void dc_power_down_on_boot(struct dc *dc)
3280 {
3281         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3282                         dc->hwss.power_down_on_boot)
3283                 dc->hwss.power_down_on_boot(dc);
3284 }
3285
3286 void dc_set_power_state(
3287         struct dc *dc,
3288         enum dc_acpi_cm_power_state power_state)
3289 {
3290         struct kref refcount;
3291         struct display_mode_lib *dml;
3292
3293         if (!dc->current_state)
3294                 return;
3295
3296         switch (power_state) {
3297         case DC_ACPI_CM_POWER_STATE_D0:
3298                 dc_resource_state_construct(dc, dc->current_state);
3299
3300 #if defined(CONFIG_DRM_AMD_DC_DCN)
3301                 dc_z10_restore(dc);
3302 #endif
3303                 if (dc->ctx->dmub_srv)
3304                         dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3305
3306                 dc->hwss.init_hw(dc);
3307
3308                 if (dc->hwss.init_sys_ctx != NULL &&
3309                         dc->vm_pa_config.valid) {
3310                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3311                 }
3312
3313                 break;
3314         default:
3315                 ASSERT(dc->current_state->stream_count == 0);
3316                 /* Zero out the current context so that on resume we start with
3317                  * clean state, and dc hw programming optimizations will not
3318                  * cause any trouble.
3319                  */
3320                 dml = kzalloc(sizeof(struct display_mode_lib),
3321                                 GFP_KERNEL);
3322
3323                 ASSERT(dml);
3324                 if (!dml)
3325                         return;
3326
3327                 /* Preserve refcount */
3328                 refcount = dc->current_state->refcount;
3329                 /* Preserve display mode lib */
3330                 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3331
3332                 dc_resource_state_destruct(dc->current_state);
3333                 memset(dc->current_state, 0,
3334                                 sizeof(*dc->current_state));
3335
3336                 dc->current_state->refcount = refcount;
3337                 dc->current_state->bw_ctx.dml = *dml;
3338
3339                 kfree(dml);
3340
3341                 break;
3342         }
3343 }
3344
3345 void dc_resume(struct dc *dc)
3346 {
3347         uint32_t i;
3348
3349         for (i = 0; i < dc->link_count; i++)
3350                 core_link_resume(dc->links[i]);
3351 }
3352
3353 bool dc_is_dmcu_initialized(struct dc *dc)
3354 {
3355         struct dmcu *dmcu = dc->res_pool->dmcu;
3356
3357         if (dmcu)
3358                 return dmcu->funcs->is_dmcu_initialized(dmcu);
3359         return false;
3360 }
3361
3362 bool dc_submit_i2c(
3363                 struct dc *dc,
3364                 uint32_t link_index,
3365                 struct i2c_command *cmd)
3366 {
3367
3368         struct dc_link *link = dc->links[link_index];
3369         struct ddc_service *ddc = link->ddc;
3370         return dce_i2c_submit_command(
3371                 dc->res_pool,
3372                 ddc->ddc_pin,
3373                 cmd);
3374 }
3375
3376 bool dc_submit_i2c_oem(
3377                 struct dc *dc,
3378                 struct i2c_command *cmd)
3379 {
3380         struct ddc_service *ddc = dc->res_pool->oem_device;
3381         return dce_i2c_submit_command(
3382                 dc->res_pool,
3383                 ddc->ddc_pin,
3384                 cmd);
3385 }
3386
3387 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3388 {
3389         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3390                 BREAK_TO_DEBUGGER();
3391                 return false;
3392         }
3393
3394         dc_sink_retain(sink);
3395
3396         dc_link->remote_sinks[dc_link->sink_count] = sink;
3397         dc_link->sink_count++;
3398
3399         return true;
3400 }
3401
3402 /*
3403  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3404  *
3405  * EDID length is in bytes
3406  */
3407 struct dc_sink *dc_link_add_remote_sink(
3408                 struct dc_link *link,
3409                 const uint8_t *edid,
3410                 int len,
3411                 struct dc_sink_init_data *init_data)
3412 {
3413         struct dc_sink *dc_sink;
3414         enum dc_edid_status edid_status;
3415
3416         if (len > DC_MAX_EDID_BUFFER_SIZE) {
3417                 dm_error("Max EDID buffer size breached!\n");
3418                 return NULL;
3419         }
3420
3421         if (!init_data) {
3422                 BREAK_TO_DEBUGGER();
3423                 return NULL;
3424         }
3425
3426         if (!init_data->link) {
3427                 BREAK_TO_DEBUGGER();
3428                 return NULL;
3429         }
3430
3431         dc_sink = dc_sink_create(init_data);
3432
3433         if (!dc_sink)
3434                 return NULL;
3435
3436         memmove(dc_sink->dc_edid.raw_edid, edid, len);
3437         dc_sink->dc_edid.length = len;
3438
3439         if (!link_add_remote_sink_helper(
3440                         link,
3441                         dc_sink))
3442                 goto fail_add_sink;
3443
3444         edid_status = dm_helpers_parse_edid_caps(
3445                         link,
3446                         &dc_sink->dc_edid,
3447                         &dc_sink->edid_caps);
3448
3449         /*
3450          * Treat device as no EDID device if EDID
3451          * parsing fails
3452          */
3453         if (edid_status != EDID_OK) {
3454                 dc_sink->dc_edid.length = 0;
3455                 dm_error("Bad EDID, status%d!\n", edid_status);
3456         }
3457
3458         return dc_sink;
3459
3460 fail_add_sink:
3461         dc_sink_release(dc_sink);
3462         return NULL;
3463 }
3464
3465 /*
3466  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3467  *
3468  * Note that this just removes the struct dc_sink - it doesn't
3469  * program hardware or alter other members of dc_link
3470  */
3471 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3472 {
3473         int i;
3474
3475         if (!link->sink_count) {
3476                 BREAK_TO_DEBUGGER();
3477                 return;
3478         }
3479
3480         for (i = 0; i < link->sink_count; i++) {
3481                 if (link->remote_sinks[i] == sink) {
3482                         dc_sink_release(sink);
3483                         link->remote_sinks[i] = NULL;
3484
3485                         /* shrink array to remove empty place */
3486                         while (i < link->sink_count - 1) {
3487                                 link->remote_sinks[i] = link->remote_sinks[i+1];
3488                                 i++;
3489                         }
3490                         link->remote_sinks[i] = NULL;
3491                         link->sink_count--;
3492                         return;
3493                 }
3494         }
3495 }
3496
3497 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3498 {
3499         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3500         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3501         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3502         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3503         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3504         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3505         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3506         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3507         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3508 }
3509 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3510 {
3511         if (dc->hwss.set_clock)
3512                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3513         return DC_ERROR_UNEXPECTED;
3514 }
3515 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3516 {
3517         if (dc->hwss.get_clock)
3518                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
3519 }
3520
3521 /* enable/disable eDP PSR without specify stream for eDP */
3522 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3523 {
3524         int i;
3525         bool allow_active;
3526
3527         for (i = 0; i < dc->current_state->stream_count ; i++) {
3528                 struct dc_link *link;
3529                 struct dc_stream_state *stream = dc->current_state->streams[i];
3530
3531                 link = stream->link;
3532                 if (!link)
3533                         continue;
3534
3535                 if (link->psr_settings.psr_feature_enabled) {
3536                         if (enable && !link->psr_settings.psr_allow_active) {
3537                                 allow_active = true;
3538                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
3539                                         return false;
3540                         } else if (!enable && link->psr_settings.psr_allow_active) {
3541                                 allow_active = false;
3542                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
3543                                         return false;
3544                         }
3545                 }
3546         }
3547
3548         return true;
3549 }
3550
3551 #if defined(CONFIG_DRM_AMD_DC_DCN)
3552
3553 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3554 {
3555         if (dc->debug.disable_idle_power_optimizations)
3556                 return;
3557
3558         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
3559                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3560                         return;
3561
3562         if (allow == dc->idle_optimizations_allowed)
3563                 return;
3564
3565         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3566                 dc->idle_optimizations_allowed = allow;
3567 }
3568
3569 /*
3570  * blank all streams, and set min and max memory clock to
3571  * lowest and highest DPM level, respectively
3572  */
3573 void dc_unlock_memory_clock_frequency(struct dc *dc)
3574 {
3575         unsigned int i;
3576
3577         for (i = 0; i < MAX_PIPES; i++)
3578                 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3579                         core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3580
3581         dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3582         dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3583 }
3584
3585 /*
3586  * set min memory clock to the min required for current mode,
3587  * max to maxDPM, and unblank streams
3588  */
3589 void dc_lock_memory_clock_frequency(struct dc *dc)
3590 {
3591         unsigned int i;
3592
3593         dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3594         dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3595         dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3596
3597         for (i = 0; i < MAX_PIPES; i++)
3598                 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3599                         core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3600 }
3601
3602 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
3603 {
3604         struct dc_state *context = dc->current_state;
3605         struct hubp *hubp;
3606         struct pipe_ctx *pipe;
3607         int i;
3608
3609         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3610                 pipe = &context->res_ctx.pipe_ctx[i];
3611
3612                 if (pipe->stream != NULL) {
3613                         dc->hwss.disable_pixel_data(dc, pipe, true);
3614
3615                         // wait for double buffer
3616                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3617                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
3618                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3619
3620                         hubp = pipe->plane_res.hubp;
3621                         hubp->funcs->set_blank_regs(hubp, true);
3622                 }
3623         }
3624
3625         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
3626         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
3627
3628         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3629                 pipe = &context->res_ctx.pipe_ctx[i];
3630
3631                 if (pipe->stream != NULL) {
3632                         dc->hwss.disable_pixel_data(dc, pipe, false);
3633
3634                         hubp = pipe->plane_res.hubp;
3635                         hubp->funcs->set_blank_regs(hubp, false);
3636                 }
3637         }
3638 }
3639
3640
3641 /**
3642  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
3643  * @dc: pointer to dc of the dm calling this
3644  * @enable: True = transition to DC mode, false = transition back to AC mode
3645  *
3646  * Some SoCs define additional clock limits when in DC mode, DM should
3647  * invoke this function when the platform undergoes a power source transition
3648  * so DC can apply/unapply the limit. This interface may be disruptive to
3649  * the onscreen content.
3650  *
3651  * Context: Triggered by OS through DM interface, or manually by escape calls.
3652  * Need to hold a dclock when doing so.
3653  *
3654  * Return: none (void function)
3655  *
3656  */
3657 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
3658 {
3659         uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
3660         unsigned int softMax, maxDPM, funcMin;
3661         bool p_state_change_support;
3662
3663         if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
3664                 return;
3665
3666         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
3667         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
3668         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
3669         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
3670
3671         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
3672                 if (p_state_change_support) {
3673                         if (funcMin <= softMax)
3674                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
3675                         // else: No-Op
3676                 } else {
3677                         if (funcMin <= softMax)
3678                                 blank_and_force_memclk(dc, true, softMax);
3679                         // else: No-Op
3680                 }
3681         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
3682                 if (p_state_change_support) {
3683                         if (funcMin <= softMax)
3684                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
3685                         // else: No-Op
3686                 } else {
3687                         if (funcMin <= softMax)
3688                                 blank_and_force_memclk(dc, true, maxDPM);
3689                         // else: No-Op
3690                 }
3691         }
3692         dc->clk_mgr->dc_mode_softmax_enabled = enable;
3693 }
3694 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3695                 struct dc_cursor_attributes *cursor_attr)
3696 {
3697         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3698                 return true;
3699         return false;
3700 }
3701
3702 /* cleanup on driver unload */
3703 void dc_hardware_release(struct dc *dc)
3704 {
3705         if (dc->hwss.hardware_release)
3706                 dc->hwss.hardware_release(dc);
3707 }
3708 #endif
3709
3710 /**
3711  * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
3712  * @dc: dc structure
3713  *
3714  * Returns: True to enable dmub notifications, False otherwise
3715  */
3716 bool dc_enable_dmub_notifications(struct dc *dc)
3717 {
3718 #if defined(CONFIG_DRM_AMD_DC_DCN)
3719         /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
3720         if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
3721             dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
3722             !dc->debug.dpia_debug.bits.disable_dpia)
3723                 return true;
3724 #endif
3725         /* dmub aux needs dmub notifications to be enabled */
3726         return dc->debug.enable_dmub_aux_for_legacy_ddc;
3727 }
3728
3729 /**
3730  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
3731  *                                      Sets port index appropriately for legacy DDC
3732  * @dc: dc structure
3733  * @link_index: link index
3734  * @payload: aux payload
3735  *
3736  * Returns: True if successful, False if failure
3737  */
3738 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3739                                 uint32_t link_index,
3740                                 struct aux_payload *payload)
3741 {
3742         uint8_t action;
3743         union dmub_rb_cmd cmd = {0};
3744         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3745
3746         ASSERT(payload->length <= 16);
3747
3748         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3749         cmd.dp_aux_access.header.payload_bytes = 0;
3750         /* For dpia, ddc_pin is set to NULL */
3751         if (!dc->links[link_index]->ddc->ddc_pin)
3752                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
3753         else
3754                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3755
3756         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3757         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3758         cmd.dp_aux_access.aux_control.timeout = 0;
3759         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3760         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3761         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3762
3763         /* set aux action */
3764         if (payload->i2c_over_aux) {
3765                 if (payload->write) {
3766                         if (payload->mot)
3767                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3768                         else
3769                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
3770                 } else {
3771                         if (payload->mot)
3772                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3773                         else
3774                                 action = DP_AUX_REQ_ACTION_I2C_READ;
3775                         }
3776         } else {
3777                 if (payload->write)
3778                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3779                 else
3780                         action = DP_AUX_REQ_ACTION_DPCD_READ;
3781         }
3782
3783         cmd.dp_aux_access.aux_control.dpaux.action = action;
3784
3785         if (payload->length && payload->write) {
3786                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3787                         payload->data,
3788                         payload->length
3789                         );
3790         }
3791
3792         dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3793         dc_dmub_srv_cmd_execute(dmub_srv);
3794         dc_dmub_srv_wait_idle(dmub_srv);
3795
3796         return true;
3797 }
3798
3799 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
3800                                             uint8_t dpia_port_index)
3801 {
3802         uint8_t index, link_index = 0xFF;
3803
3804         for (index = 0; index < dc->link_count; index++) {
3805                 /* ddc_hw_inst has dpia port index for dpia links
3806                  * and ddc instance for legacy links
3807                  */
3808                 if (!dc->links[index]->ddc->ddc_pin) {
3809                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
3810                                 link_index = index;
3811                                 break;
3812                         }
3813                 }
3814         }
3815         ASSERT(link_index != 0xFF);
3816         return link_index;
3817 }
3818
3819 /**
3820  *****************************************************************************
3821  *  Function: dc_process_dmub_set_config_async
3822  *
3823  *  @brief
3824  *              Submits set_config command to dmub via inbox message
3825  *
3826  *  @param
3827  *              [in] dc: dc structure
3828  *              [in] link_index: link index
3829  *              [in] payload: aux payload
3830  *              [out] notify: set_config immediate reply
3831  *
3832  *      @return
3833  *              True if successful, False if failure
3834  *****************************************************************************
3835  */
3836 bool dc_process_dmub_set_config_async(struct dc *dc,
3837                                 uint32_t link_index,
3838                                 struct set_config_cmd_payload *payload,
3839                                 struct dmub_notification *notify)
3840 {
3841         union dmub_rb_cmd cmd = {0};
3842         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3843         bool is_cmd_complete = true;
3844
3845         /* prepare SET_CONFIG command */
3846         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
3847         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
3848
3849         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
3850         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
3851         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
3852
3853         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
3854                 /* command is not processed by dmub */
3855                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
3856                 return is_cmd_complete;
3857         }
3858
3859         /* command processed by dmub, if ret_status is 1, it is completed instantly */
3860         if (cmd.set_config_access.header.ret_status == 1)
3861                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
3862         else
3863                 /* cmd pending, will receive notification via outbox */
3864                 is_cmd_complete = false;
3865
3866         return is_cmd_complete;
3867 }
3868
3869 /**
3870  *****************************************************************************
3871  *  Function: dc_process_dmub_set_mst_slots
3872  *
3873  *  @brief
3874  *              Submits mst slot allocation command to dmub via inbox message
3875  *
3876  *  @param
3877  *              [in] dc: dc structure
3878  *              [in] link_index: link index
3879  *              [in] mst_alloc_slots: mst slots to be allotted
3880  *              [out] mst_slots_in_use: mst slots in use returned in failure case
3881  *
3882  *      @return
3883  *              DC_OK if successful, DC_ERROR if failure
3884  *****************************************************************************
3885  */
3886 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
3887                                 uint32_t link_index,
3888                                 uint8_t mst_alloc_slots,
3889                                 uint8_t *mst_slots_in_use)
3890 {
3891         union dmub_rb_cmd cmd = {0};
3892         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3893
3894         /* prepare MST_ALLOC_SLOTS command */
3895         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
3896         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
3897
3898         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
3899         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
3900
3901         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
3902                 /* command is not processed by dmub */
3903                 return DC_ERROR_UNEXPECTED;
3904
3905         /* command processed by dmub, if ret_status is 1 */
3906         if (cmd.set_config_access.header.ret_status != 1)
3907                 /* command processing error */
3908                 return DC_ERROR_UNEXPECTED;
3909
3910         /* command processed and we have a status of 2, mst not enabled in dpia */
3911         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
3912                 return DC_FAIL_UNSUPPORTED_1;
3913
3914         /* previously configured mst alloc and used slots did not match */
3915         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
3916                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
3917                 return DC_NOT_SUPPORTED;
3918         }
3919
3920         return DC_OK;
3921 }
3922
3923 /**
3924  * dc_disable_accelerated_mode - disable accelerated mode
3925  * @dc: dc structure
3926  */
3927 void dc_disable_accelerated_mode(struct dc *dc)
3928 {
3929         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3930 }
3931
3932
3933 /**
3934  *****************************************************************************
3935  *  dc_notify_vsync_int_state() - notifies vsync enable/disable state
3936  *  @dc: dc structure
3937  *      @stream: stream where vsync int state changed
3938  *      @enable: whether vsync is enabled or disabled
3939  *
3940  *  Called when vsync is enabled/disabled
3941  *      Will notify DMUB to start/stop ABM interrupts after steady state is reached
3942  *
3943  *****************************************************************************
3944  */
3945 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
3946 {
3947         int i;
3948         int edp_num;
3949         struct pipe_ctx *pipe = NULL;
3950         struct dc_link *link = stream->sink->link;
3951         struct dc_link *edp_links[MAX_NUM_EDP];
3952
3953
3954         if (link->psr_settings.psr_feature_enabled)
3955                 return;
3956
3957         /*find primary pipe associated with stream*/
3958         for (i = 0; i < MAX_PIPES; i++) {
3959                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3960
3961                 if (pipe->stream == stream && pipe->stream_res.tg)
3962                         break;
3963         }
3964
3965         if (i == MAX_PIPES) {
3966                 ASSERT(0);
3967                 return;
3968         }
3969
3970         get_edp_links(dc, edp_links, &edp_num);
3971
3972         /* Determine panel inst */
3973         for (i = 0; i < edp_num; i++) {
3974                 if (edp_links[i] == link)
3975                         break;
3976         }
3977
3978         if (i == edp_num) {
3979                 return;
3980         }
3981
3982         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
3983                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
3984 }