Merge branches 'clk-range', 'clk-uniphier', 'clk-apple' and 'clk-qcom' into clk-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / dc / core / dc.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "dm_services.h"
29
30 #include "dc.h"
31
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36
37 #include "resource.h"
38
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42
43 #include "bios_parser_interface.h"
44 #include "bios/bios_parser_helper.h"
45 #include "include/irq_service_interface.h"
46 #include "transform.h"
47 #include "dmcu.h"
48 #include "dpp.h"
49 #include "timing_generator.h"
50 #include "abm.h"
51 #include "virtual/virtual_link_encoder.h"
52 #include "hubp.h"
53
54 #include "link_hwss.h"
55 #include "link_encoder.h"
56 #include "link_enc_cfg.h"
57
58 #include "dc_link.h"
59 #include "dc_link_ddc.h"
60 #include "dm_helpers.h"
61 #include "mem_input.h"
62
63 #include "dc_link_dp.h"
64 #include "dc_dmub_srv.h"
65
66 #include "dsc.h"
67
68 #include "vm_helper.h"
69
70 #include "dce/dce_i2c.h"
71
72 #include "dmub/dmub_srv.h"
73
74 #include "i2caux_interface.h"
75 #include "dce/dmub_hw_lock_mgr.h"
76
77 #include "dc_trace.h"
78
79 #define CTX \
80         dc->ctx
81
82 #define DC_LOGGER \
83         dc->ctx->logger
84
85 static const char DC_BUILD_ID[] = "production-build";
86
87 /**
88  * DOC: Overview
89  *
90  * DC is the OS-agnostic component of the amdgpu DC driver.
91  *
92  * DC maintains and validates a set of structs representing the state of the
93  * driver and writes that state to AMD hardware
94  *
95  * Main DC HW structs:
96  *
97  * struct dc - The central struct.  One per driver.  Created on driver load,
98  * destroyed on driver unload.
99  *
100  * struct dc_context - One per driver.
101  * Used as a backpointer by most other structs in dc.
102  *
103  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
104  * plugpoints).  Created on driver load, destroyed on driver unload.
105  *
106  * struct dc_sink - One per display.  Created on boot or hotplug.
107  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
108  * (the display directly attached).  It may also have one or more remote
109  * sinks (in the Multi-Stream Transport case)
110  *
111  * struct resource_pool - One per driver.  Represents the hw blocks not in the
112  * main pipeline.  Not directly accessible by dm.
113  *
114  * Main dc state structs:
115  *
116  * These structs can be created and destroyed as needed.  There is a full set of
117  * these structs in dc->current_state representing the currently programmed state.
118  *
119  * struct dc_state - The global DC state to track global state information,
120  * such as bandwidth values.
121  *
122  * struct dc_stream_state - Represents the hw configuration for the pipeline from
123  * a framebuffer to a display.  Maps one-to-one with dc_sink.
124  *
125  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
126  * and may have more in the Multi-Plane Overlay case.
127  *
128  * struct resource_context - Represents the programmable state of everything in
129  * the resource_pool.  Not directly accessible by dm.
130  *
131  * struct pipe_ctx - A member of struct resource_context.  Represents the
132  * internal hardware pipeline components.  Each dc_plane_state has either
133  * one or two (in the pipe-split case).
134  */
135
136 /*******************************************************************************
137  * Private functions
138  ******************************************************************************/
139
140 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
141 {
142         if (new > *original)
143                 *original = new;
144 }
145
146 static void destroy_links(struct dc *dc)
147 {
148         uint32_t i;
149
150         for (i = 0; i < dc->link_count; i++) {
151                 if (NULL != dc->links[i])
152                         link_destroy(&dc->links[i]);
153         }
154 }
155
156 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
157 {
158         int i;
159         uint32_t count = 0;
160
161         for (i = 0; i < num_links; i++) {
162                 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
163                                 links[i]->is_internal_display)
164                         count++;
165         }
166
167         return count;
168 }
169
170 static int get_seamless_boot_stream_count(struct dc_state *ctx)
171 {
172         uint8_t i;
173         uint8_t seamless_boot_stream_count = 0;
174
175         for (i = 0; i < ctx->stream_count; i++)
176                 if (ctx->streams[i]->apply_seamless_boot_optimization)
177                         seamless_boot_stream_count++;
178
179         return seamless_boot_stream_count;
180 }
181
182 static bool create_links(
183                 struct dc *dc,
184                 uint32_t num_virtual_links)
185 {
186         int i;
187         int connectors_num;
188         struct dc_bios *bios = dc->ctx->dc_bios;
189
190         dc->link_count = 0;
191
192         connectors_num = bios->funcs->get_connectors_number(bios);
193
194         DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
195
196         if (connectors_num > ENUM_ID_COUNT) {
197                 dm_error(
198                         "DC: Number of connectors %d exceeds maximum of %d!\n",
199                         connectors_num,
200                         ENUM_ID_COUNT);
201                 return false;
202         }
203
204         dm_output_to_console(
205                 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
206                 __func__,
207                 connectors_num,
208                 num_virtual_links);
209
210         for (i = 0; i < connectors_num; i++) {
211                 struct link_init_data link_init_params = {0};
212                 struct dc_link *link;
213
214                 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
215
216                 link_init_params.ctx = dc->ctx;
217                 /* next BIOS object table connector */
218                 link_init_params.connector_index = i;
219                 link_init_params.link_index = dc->link_count;
220                 link_init_params.dc = dc;
221                 link = link_create(&link_init_params);
222
223                 if (link) {
224                         dc->links[dc->link_count] = link;
225                         link->dc = dc;
226                         ++dc->link_count;
227                 }
228         }
229
230         DC_LOG_DC("BIOS object table - end");
231
232         /* Create a link for each usb4 dpia port */
233         for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
234                 struct link_init_data link_init_params = {0};
235                 struct dc_link *link;
236
237                 link_init_params.ctx = dc->ctx;
238                 link_init_params.connector_index = i;
239                 link_init_params.link_index = dc->link_count;
240                 link_init_params.dc = dc;
241                 link_init_params.is_dpia_link = true;
242
243                 link = link_create(&link_init_params);
244                 if (link) {
245                         dc->links[dc->link_count] = link;
246                         link->dc = dc;
247                         ++dc->link_count;
248                 }
249         }
250
251         for (i = 0; i < num_virtual_links; i++) {
252                 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
253                 struct encoder_init_data enc_init = {0};
254
255                 if (link == NULL) {
256                         BREAK_TO_DEBUGGER();
257                         goto failed_alloc;
258                 }
259
260                 link->link_index = dc->link_count;
261                 dc->links[dc->link_count] = link;
262                 dc->link_count++;
263
264                 link->ctx = dc->ctx;
265                 link->dc = dc;
266                 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
267                 link->link_id.type = OBJECT_TYPE_CONNECTOR;
268                 link->link_id.id = CONNECTOR_ID_VIRTUAL;
269                 link->link_id.enum_id = ENUM_ID_1;
270                 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
271
272                 if (!link->link_enc) {
273                         BREAK_TO_DEBUGGER();
274                         goto failed_alloc;
275                 }
276
277                 link->link_status.dpcd_caps = &link->dpcd_caps;
278
279                 enc_init.ctx = dc->ctx;
280                 enc_init.channel = CHANNEL_ID_UNKNOWN;
281                 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
282                 enc_init.transmitter = TRANSMITTER_UNKNOWN;
283                 enc_init.connector = link->link_id;
284                 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
285                 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
286                 enc_init.encoder.enum_id = ENUM_ID_1;
287                 virtual_link_encoder_construct(link->link_enc, &enc_init);
288         }
289
290         dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
291
292         return true;
293
294 failed_alloc:
295         return false;
296 }
297
298 /* Create additional DIG link encoder objects if fewer than the platform
299  * supports were created during link construction. This can happen if the
300  * number of physical connectors is less than the number of DIGs.
301  */
302 static bool create_link_encoders(struct dc *dc)
303 {
304         bool res = true;
305         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
306         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
307         int i;
308
309         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
310          * link encoders and physical display endpoints and does not require
311          * additional link encoder objects.
312          */
313         if (num_usb4_dpia == 0)
314                 return res;
315
316         /* Create as many link encoder objects as the platform supports. DPIA
317          * endpoints can be programmably mapped to any DIG.
318          */
319         if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
320                 for (i = 0; i < num_dig_link_enc; i++) {
321                         struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
322
323                         if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
324                                 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
325                                                 (enum engine_id)(ENGINE_ID_DIGA + i));
326                                 if (link_enc) {
327                                         dc->res_pool->link_encoders[i] = link_enc;
328                                         dc->res_pool->dig_link_enc_count++;
329                                 } else {
330                                         res = false;
331                                 }
332                         }
333                 }
334         }
335
336         return res;
337 }
338
339 /* Destroy any additional DIG link encoder objects created by
340  * create_link_encoders().
341  * NB: Must only be called after destroy_links().
342  */
343 static void destroy_link_encoders(struct dc *dc)
344 {
345         unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
346         unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
347         int i;
348
349         /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
350          * link encoders and physical display endpoints and does not require
351          * additional link encoder objects.
352          */
353         if (num_usb4_dpia == 0)
354                 return;
355
356         for (i = 0; i < num_dig_link_enc; i++) {
357                 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
358
359                 if (link_enc) {
360                         link_enc->funcs->destroy(&link_enc);
361                         dc->res_pool->link_encoders[i] = NULL;
362                         dc->res_pool->dig_link_enc_count--;
363                 }
364         }
365 }
366
367 static struct dc_perf_trace *dc_perf_trace_create(void)
368 {
369         return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
370 }
371
372 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
373 {
374         kfree(*perf_trace);
375         *perf_trace = NULL;
376 }
377
378 /**
379  *  dc_stream_adjust_vmin_vmax:
380  *
381  *  Looks up the pipe context of dc_stream_state and updates the
382  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
383  *  Rate, which is a power-saving feature that targets reducing panel
384  *  refresh rate while the screen is static
385  *
386  *  @dc:     dc reference
387  *  @stream: Initial dc stream state
388  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
389  */
390 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
391                 struct dc_stream_state *stream,
392                 struct dc_crtc_timing_adjust *adjust)
393 {
394         int i;
395         bool ret = false;
396
397         stream->adjust.v_total_max = adjust->v_total_max;
398         stream->adjust.v_total_mid = adjust->v_total_mid;
399         stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
400         stream->adjust.v_total_min = adjust->v_total_min;
401
402         for (i = 0; i < MAX_PIPES; i++) {
403                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
404
405                 if (pipe->stream == stream && pipe->stream_res.tg) {
406                         dc->hwss.set_drr(&pipe,
407                                         1,
408                                         *adjust);
409
410                         ret = true;
411                 }
412         }
413         return ret;
414 }
415
416 /**
417  *****************************************************************************
418  *  Function: dc_stream_get_last_vrr_vtotal
419  *
420  *  @brief
421  *     Looks up the pipe context of dc_stream_state and gets the
422  *     last VTOTAL used by DRR (Dynamic Refresh Rate)
423  *
424  *  @param [in] dc: dc reference
425  *  @param [in] stream: Initial dc stream state
426  *  @param [in] adjust: Updated parameters for vertical_total_min and
427  *  vertical_total_max
428  *****************************************************************************
429  */
430 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
431                 struct dc_stream_state *stream,
432                 uint32_t *refresh_rate)
433 {
434         bool status = false;
435
436         int i = 0;
437
438         for (i = 0; i < MAX_PIPES; i++) {
439                 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
440
441                 if (pipe->stream == stream && pipe->stream_res.tg) {
442                         /* Only execute if a function pointer has been defined for
443                          * the DC version in question
444                          */
445                         if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
446                                 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
447
448                                 status = true;
449
450                                 break;
451                         }
452                 }
453         }
454
455         return status;
456 }
457
458 bool dc_stream_get_crtc_position(struct dc *dc,
459                 struct dc_stream_state **streams, int num_streams,
460                 unsigned int *v_pos, unsigned int *nom_v_pos)
461 {
462         /* TODO: Support multiple streams */
463         const struct dc_stream_state *stream = streams[0];
464         int i;
465         bool ret = false;
466         struct crtc_position position;
467
468         for (i = 0; i < MAX_PIPES; i++) {
469                 struct pipe_ctx *pipe =
470                                 &dc->current_state->res_ctx.pipe_ctx[i];
471
472                 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
473                         dc->hwss.get_position(&pipe, 1, &position);
474
475                         *v_pos = position.vertical_count;
476                         *nom_v_pos = position.nominal_vcount;
477                         ret = true;
478                 }
479         }
480         return ret;
481 }
482
483 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
484 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
485                              struct crc_params *crc_window)
486 {
487         int i;
488         struct dmcu *dmcu = dc->res_pool->dmcu;
489         struct pipe_ctx *pipe;
490         struct crc_region tmp_win, *crc_win;
491         struct otg_phy_mux mapping_tmp, *mux_mapping;
492
493         /*crc window can't be null*/
494         if (!crc_window)
495                 return false;
496
497         if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
498                 crc_win = &tmp_win;
499                 mux_mapping = &mapping_tmp;
500                 /*set crc window*/
501                 tmp_win.x_start = crc_window->windowa_x_start;
502                 tmp_win.y_start = crc_window->windowa_y_start;
503                 tmp_win.x_end = crc_window->windowa_x_end;
504                 tmp_win.y_end = crc_window->windowa_y_end;
505
506                 for (i = 0; i < MAX_PIPES; i++) {
507                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
508                         if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
509                                 break;
510                 }
511
512                 /* Stream not found */
513                 if (i == MAX_PIPES)
514                         return false;
515
516
517                 /*set mux routing info*/
518                 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
519                 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
520
521                 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
522         } else {
523                 DC_LOG_DC("dmcu is not initialized");
524                 return false;
525         }
526
527         return true;
528 }
529
530 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
531 {
532         int i;
533         struct dmcu *dmcu = dc->res_pool->dmcu;
534         struct pipe_ctx *pipe;
535         struct otg_phy_mux mapping_tmp, *mux_mapping;
536
537         if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
538                 mux_mapping = &mapping_tmp;
539
540                 for (i = 0; i < MAX_PIPES; i++) {
541                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
542                         if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
543                                 break;
544                 }
545
546                 /* Stream not found */
547                 if (i == MAX_PIPES)
548                         return false;
549
550
551                 /*set mux routing info*/
552                 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
553                 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
554
555                 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
556         } else {
557                 DC_LOG_DC("dmcu is not initialized");
558                 return false;
559         }
560
561         return true;
562 }
563 #endif
564
565 /**
566  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
567  * @dc: DC Object
568  * @stream: The stream to configure CRC on.
569  * @enable: Enable CRC if true, disable otherwise.
570  * @crc_window: CRC window (x/y start/end) information
571  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
572  *              once.
573  *
574  * By default, only CRC0 is configured, and the entire frame is used to
575  * calculate the crc.
576  */
577 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
578                              struct crc_params *crc_window, bool enable, bool continuous)
579 {
580         int i;
581         struct pipe_ctx *pipe;
582         struct crc_params param;
583         struct timing_generator *tg;
584
585         for (i = 0; i < MAX_PIPES; i++) {
586                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
587                 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
588                         break;
589         }
590         /* Stream not found */
591         if (i == MAX_PIPES)
592                 return false;
593
594         /* By default, capture the full frame */
595         param.windowa_x_start = 0;
596         param.windowa_y_start = 0;
597         param.windowa_x_end = pipe->stream->timing.h_addressable;
598         param.windowa_y_end = pipe->stream->timing.v_addressable;
599         param.windowb_x_start = 0;
600         param.windowb_y_start = 0;
601         param.windowb_x_end = pipe->stream->timing.h_addressable;
602         param.windowb_y_end = pipe->stream->timing.v_addressable;
603
604         if (crc_window) {
605                 param.windowa_x_start = crc_window->windowa_x_start;
606                 param.windowa_y_start = crc_window->windowa_y_start;
607                 param.windowa_x_end = crc_window->windowa_x_end;
608                 param.windowa_y_end = crc_window->windowa_y_end;
609                 param.windowb_x_start = crc_window->windowb_x_start;
610                 param.windowb_y_start = crc_window->windowb_y_start;
611                 param.windowb_x_end = crc_window->windowb_x_end;
612                 param.windowb_y_end = crc_window->windowb_y_end;
613         }
614
615         param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
616         param.odm_mode = pipe->next_odm_pipe ? 1:0;
617
618         /* Default to the union of both windows */
619         param.selection = UNION_WINDOW_A_B;
620         param.continuous_mode = continuous;
621         param.enable = enable;
622
623         tg = pipe->stream_res.tg;
624
625         /* Only call if supported */
626         if (tg->funcs->configure_crc)
627                 return tg->funcs->configure_crc(tg, &param);
628         DC_LOG_WARNING("CRC capture not supported.");
629         return false;
630 }
631
632 /**
633  * dc_stream_get_crc() - Get CRC values for the given stream.
634  * @dc: DC object
635  * @stream: The DC stream state of the stream to get CRCs from.
636  * @r_cr: CRC value for the first of the 3 channels stored here.
637  * @g_y:  CRC value for the second of the 3 channels stored here.
638  * @b_cb: CRC value for the third of the 3 channels stored here.
639  *
640  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
641  * Return false if stream is not found, or if CRCs are not enabled.
642  */
643 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
644                        uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
645 {
646         int i;
647         struct pipe_ctx *pipe;
648         struct timing_generator *tg;
649
650         for (i = 0; i < MAX_PIPES; i++) {
651                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
652                 if (pipe->stream == stream)
653                         break;
654         }
655         /* Stream not found */
656         if (i == MAX_PIPES)
657                 return false;
658
659         tg = pipe->stream_res.tg;
660
661         if (tg->funcs->get_crc)
662                 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
663         DC_LOG_WARNING("CRC capture not supported.");
664         return false;
665 }
666
667 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
668                 enum dc_dynamic_expansion option)
669 {
670         /* OPP FMT dyn expansion updates*/
671         int i;
672         struct pipe_ctx *pipe_ctx;
673
674         for (i = 0; i < MAX_PIPES; i++) {
675                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
676                                 == stream) {
677                         pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
678                         pipe_ctx->stream_res.opp->dyn_expansion = option;
679                         pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
680                                         pipe_ctx->stream_res.opp,
681                                         COLOR_SPACE_YCBCR601,
682                                         stream->timing.display_color_depth,
683                                         stream->signal);
684                 }
685         }
686 }
687
688 void dc_stream_set_dither_option(struct dc_stream_state *stream,
689                 enum dc_dither_option option)
690 {
691         struct bit_depth_reduction_params params;
692         struct dc_link *link = stream->link;
693         struct pipe_ctx *pipes = NULL;
694         int i;
695
696         for (i = 0; i < MAX_PIPES; i++) {
697                 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
698                                 stream) {
699                         pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
700                         break;
701                 }
702         }
703
704         if (!pipes)
705                 return;
706         if (option > DITHER_OPTION_MAX)
707                 return;
708
709         stream->dither_option = option;
710
711         memset(&params, 0, sizeof(params));
712         resource_build_bit_depth_reduction_params(stream, &params);
713         stream->bit_depth_params = params;
714
715         if (pipes->plane_res.xfm &&
716             pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
717                 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
718                         pipes->plane_res.xfm,
719                         pipes->plane_res.scl_data.lb_params.depth,
720                         &stream->bit_depth_params);
721         }
722
723         pipes->stream_res.opp->funcs->
724                 opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
725 }
726
727 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
728 {
729         int i;
730         bool ret = false;
731         struct pipe_ctx *pipes;
732
733         for (i = 0; i < MAX_PIPES; i++) {
734                 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
735                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
736                         dc->hwss.program_gamut_remap(pipes);
737                         ret = true;
738                 }
739         }
740
741         return ret;
742 }
743
744 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
745 {
746         int i;
747         bool ret = false;
748         struct pipe_ctx *pipes;
749
750         for (i = 0; i < MAX_PIPES; i++) {
751                 if (dc->current_state->res_ctx.pipe_ctx[i].stream
752                                 == stream) {
753
754                         pipes = &dc->current_state->res_ctx.pipe_ctx[i];
755                         dc->hwss.program_output_csc(dc,
756                                         pipes,
757                                         stream->output_color_space,
758                                         stream->csc_color_matrix.matrix,
759                                         pipes->stream_res.opp->inst);
760                         ret = true;
761                 }
762         }
763
764         return ret;
765 }
766
767 void dc_stream_set_static_screen_params(struct dc *dc,
768                 struct dc_stream_state **streams,
769                 int num_streams,
770                 const struct dc_static_screen_params *params)
771 {
772         int i, j;
773         struct pipe_ctx *pipes_affected[MAX_PIPES];
774         int num_pipes_affected = 0;
775
776         for (i = 0; i < num_streams; i++) {
777                 struct dc_stream_state *stream = streams[i];
778
779                 for (j = 0; j < MAX_PIPES; j++) {
780                         if (dc->current_state->res_ctx.pipe_ctx[j].stream
781                                         == stream) {
782                                 pipes_affected[num_pipes_affected++] =
783                                                 &dc->current_state->res_ctx.pipe_ctx[j];
784                         }
785                 }
786         }
787
788         dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
789 }
790
791 static void dc_destruct(struct dc *dc)
792 {
793         // reset link encoder assignment table on destruct
794         if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
795                 link_enc_cfg_init(dc, dc->current_state);
796
797         if (dc->current_state) {
798                 dc_release_state(dc->current_state);
799                 dc->current_state = NULL;
800         }
801
802         destroy_links(dc);
803
804         destroy_link_encoders(dc);
805
806         if (dc->clk_mgr) {
807                 dc_destroy_clk_mgr(dc->clk_mgr);
808                 dc->clk_mgr = NULL;
809         }
810
811         dc_destroy_resource_pool(dc);
812
813         if (dc->ctx->gpio_service)
814                 dal_gpio_service_destroy(&dc->ctx->gpio_service);
815
816         if (dc->ctx->created_bios)
817                 dal_bios_parser_destroy(&dc->ctx->dc_bios);
818
819         dc_perf_trace_destroy(&dc->ctx->perf_trace);
820
821         kfree(dc->ctx);
822         dc->ctx = NULL;
823
824         kfree(dc->bw_vbios);
825         dc->bw_vbios = NULL;
826
827         kfree(dc->bw_dceip);
828         dc->bw_dceip = NULL;
829
830 #ifdef CONFIG_DRM_AMD_DC_DCN
831         kfree(dc->dcn_soc);
832         dc->dcn_soc = NULL;
833
834         kfree(dc->dcn_ip);
835         dc->dcn_ip = NULL;
836
837 #endif
838         kfree(dc->vm_helper);
839         dc->vm_helper = NULL;
840
841 }
842
843 static bool dc_construct_ctx(struct dc *dc,
844                 const struct dc_init_data *init_params)
845 {
846         struct dc_context *dc_ctx;
847         enum dce_version dc_version = DCE_VERSION_UNKNOWN;
848
849         dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
850         if (!dc_ctx)
851                 return false;
852
853         dc_ctx->cgs_device = init_params->cgs_device;
854         dc_ctx->driver_context = init_params->driver;
855         dc_ctx->dc = dc;
856         dc_ctx->asic_id = init_params->asic_id;
857         dc_ctx->dc_sink_id_count = 0;
858         dc_ctx->dc_stream_id_count = 0;
859         dc_ctx->dce_environment = init_params->dce_environment;
860
861         /* Create logger */
862
863         dc_version = resource_parse_asic_id(init_params->asic_id);
864         dc_ctx->dce_version = dc_version;
865
866         dc_ctx->perf_trace = dc_perf_trace_create();
867         if (!dc_ctx->perf_trace) {
868                 ASSERT_CRITICAL(false);
869                 return false;
870         }
871
872         dc->ctx = dc_ctx;
873
874         return true;
875 }
876
877 static bool dc_construct(struct dc *dc,
878                 const struct dc_init_data *init_params)
879 {
880         struct dc_context *dc_ctx;
881         struct bw_calcs_dceip *dc_dceip;
882         struct bw_calcs_vbios *dc_vbios;
883 #ifdef CONFIG_DRM_AMD_DC_DCN
884         struct dcn_soc_bounding_box *dcn_soc;
885         struct dcn_ip_params *dcn_ip;
886 #endif
887
888         dc->config = init_params->flags;
889
890         // Allocate memory for the vm_helper
891         dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
892         if (!dc->vm_helper) {
893                 dm_error("%s: failed to create dc->vm_helper\n", __func__);
894                 goto fail;
895         }
896
897         memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
898
899         dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
900         if (!dc_dceip) {
901                 dm_error("%s: failed to create dceip\n", __func__);
902                 goto fail;
903         }
904
905         dc->bw_dceip = dc_dceip;
906
907         dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
908         if (!dc_vbios) {
909                 dm_error("%s: failed to create vbios\n", __func__);
910                 goto fail;
911         }
912
913         dc->bw_vbios = dc_vbios;
914 #ifdef CONFIG_DRM_AMD_DC_DCN
915         dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
916         if (!dcn_soc) {
917                 dm_error("%s: failed to create dcn_soc\n", __func__);
918                 goto fail;
919         }
920
921         dc->dcn_soc = dcn_soc;
922
923         dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
924         if (!dcn_ip) {
925                 dm_error("%s: failed to create dcn_ip\n", __func__);
926                 goto fail;
927         }
928
929         dc->dcn_ip = dcn_ip;
930 #endif
931
932         if (!dc_construct_ctx(dc, init_params)) {
933                 dm_error("%s: failed to create ctx\n", __func__);
934                 goto fail;
935         }
936
937         dc_ctx = dc->ctx;
938
939         /* Resource should construct all asic specific resources.
940          * This should be the only place where we need to parse the asic id
941          */
942         if (init_params->vbios_override)
943                 dc_ctx->dc_bios = init_params->vbios_override;
944         else {
945                 /* Create BIOS parser */
946                 struct bp_init_data bp_init_data;
947
948                 bp_init_data.ctx = dc_ctx;
949                 bp_init_data.bios = init_params->asic_id.atombios_base_address;
950
951                 dc_ctx->dc_bios = dal_bios_parser_create(
952                                 &bp_init_data, dc_ctx->dce_version);
953
954                 if (!dc_ctx->dc_bios) {
955                         ASSERT_CRITICAL(false);
956                         goto fail;
957                 }
958
959                 dc_ctx->created_bios = true;
960         }
961
962         dc->vendor_signature = init_params->vendor_signature;
963
964         /* Create GPIO service */
965         dc_ctx->gpio_service = dal_gpio_service_create(
966                         dc_ctx->dce_version,
967                         dc_ctx->dce_environment,
968                         dc_ctx);
969
970         if (!dc_ctx->gpio_service) {
971                 ASSERT_CRITICAL(false);
972                 goto fail;
973         }
974
975         dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
976         if (!dc->res_pool)
977                 goto fail;
978
979         /* set i2c speed if not done by the respective dcnxxx__resource.c */
980         if (dc->caps.i2c_speed_in_khz_hdcp == 0)
981                 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
982
983         dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
984         if (!dc->clk_mgr)
985                 goto fail;
986 #ifdef CONFIG_DRM_AMD_DC_DCN
987         dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
988 #endif
989
990         if (dc->res_pool->funcs->update_bw_bounding_box)
991                 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
992
993         /* Creation of current_state must occur after dc->dml
994          * is initialized in dc_create_resource_pool because
995          * on creation it copies the contents of dc->dml
996          */
997
998         dc->current_state = dc_create_state(dc);
999
1000         if (!dc->current_state) {
1001                 dm_error("%s: failed to create validate ctx\n", __func__);
1002                 goto fail;
1003         }
1004
1005         if (!create_links(dc, init_params->num_virtual_links))
1006                 goto fail;
1007
1008         /* Create additional DIG link encoder objects if fewer than the platform
1009          * supports were created during link construction.
1010          */
1011         if (!create_link_encoders(dc))
1012                 goto fail;
1013
1014         dc_resource_state_construct(dc, dc->current_state);
1015
1016         return true;
1017
1018 fail:
1019         return false;
1020 }
1021
1022 static void disable_all_writeback_pipes_for_stream(
1023                 const struct dc *dc,
1024                 struct dc_stream_state *stream,
1025                 struct dc_state *context)
1026 {
1027         int i;
1028
1029         for (i = 0; i < stream->num_wb_info; i++)
1030                 stream->writeback_info[i].wb_enabled = false;
1031 }
1032
1033 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1034                                           struct dc_stream_state *stream, bool lock)
1035 {
1036         int i;
1037
1038         /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1039         if (dc->hwss.interdependent_update_lock)
1040                 dc->hwss.interdependent_update_lock(dc, context, lock);
1041         else {
1042                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1043                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1044                         struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1045
1046                         // Copied conditions that were previously in dce110_apply_ctx_for_surface
1047                         if (stream == pipe_ctx->stream) {
1048                                 if (!pipe_ctx->top_pipe &&
1049                                         (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1050                                         dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1051                         }
1052                 }
1053         }
1054 }
1055
1056 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1057 {
1058         int i, j;
1059         struct dc_state *dangling_context = dc_create_state(dc);
1060         struct dc_state *current_ctx;
1061
1062         if (dangling_context == NULL)
1063                 return;
1064
1065         dc_resource_state_copy_construct(dc->current_state, dangling_context);
1066
1067         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1068                 struct dc_stream_state *old_stream =
1069                                 dc->current_state->res_ctx.pipe_ctx[i].stream;
1070                 bool should_disable = true;
1071                 bool pipe_split_change =
1072                         context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1073
1074                 for (j = 0; j < context->stream_count; j++) {
1075                         if (old_stream == context->streams[j]) {
1076                                 should_disable = false;
1077                                 break;
1078                         }
1079                 }
1080                 if (!should_disable && pipe_split_change)
1081                         should_disable = true;
1082
1083                 if (should_disable && old_stream) {
1084                         dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1085                         disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1086
1087                         if (dc->hwss.apply_ctx_for_surface) {
1088                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1089                                 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1090                                 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1091                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1092                         }
1093                         if (dc->hwss.program_front_end_for_ctx) {
1094                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1095                                 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1096                                 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1097                                 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1098                         }
1099                 }
1100         }
1101
1102         current_ctx = dc->current_state;
1103         dc->current_state = dangling_context;
1104         dc_release_state(current_ctx);
1105 }
1106
1107 static void disable_vbios_mode_if_required(
1108                 struct dc *dc,
1109                 struct dc_state *context)
1110 {
1111         unsigned int i, j;
1112
1113         /* check if timing_changed, disable stream*/
1114         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1115                 struct dc_stream_state *stream = NULL;
1116                 struct dc_link *link = NULL;
1117                 struct pipe_ctx *pipe = NULL;
1118
1119                 pipe = &context->res_ctx.pipe_ctx[i];
1120                 stream = pipe->stream;
1121                 if (stream == NULL)
1122                         continue;
1123
1124                 // only looking for first odm pipe
1125                 if (pipe->prev_odm_pipe)
1126                         continue;
1127
1128                 if (stream->link->local_sink &&
1129                         stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1130                         link = stream->link;
1131                 }
1132
1133                 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1134                         unsigned int enc_inst, tg_inst = 0;
1135                         unsigned int pix_clk_100hz;
1136
1137                         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1138                         if (enc_inst != ENGINE_ID_UNKNOWN) {
1139                                 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1140                                         if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1141                                                 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1142                                                         dc->res_pool->stream_enc[j]);
1143                                                 break;
1144                                         }
1145                                 }
1146
1147                                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1148                                         dc->res_pool->dp_clock_source,
1149                                         tg_inst, &pix_clk_100hz);
1150
1151                                 if (link->link_status.link_active) {
1152                                         uint32_t requested_pix_clk_100hz =
1153                                                 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1154
1155                                         if (pix_clk_100hz != requested_pix_clk_100hz) {
1156                                                 core_link_disable_stream(pipe);
1157                                                 pipe->stream->dpms_off = false;
1158                                         }
1159                                 }
1160                         }
1161                 }
1162         }
1163 }
1164
1165 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1166 {
1167         int i;
1168         PERF_TRACE();
1169         for (i = 0; i < MAX_PIPES; i++) {
1170                 int count = 0;
1171                 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1172
1173                 if (!pipe->plane_state)
1174                         continue;
1175
1176                 /* Timeout 100 ms */
1177                 while (count < 100000) {
1178                         /* Must set to false to start with, due to OR in update function */
1179                         pipe->plane_state->status.is_flip_pending = false;
1180                         dc->hwss.update_pending_status(pipe);
1181                         if (!pipe->plane_state->status.is_flip_pending)
1182                                 break;
1183                         udelay(1);
1184                         count++;
1185                 }
1186                 ASSERT(!pipe->plane_state->status.is_flip_pending);
1187         }
1188         PERF_TRACE();
1189 }
1190
1191 /*******************************************************************************
1192  * Public functions
1193  ******************************************************************************/
1194
1195 struct dc *dc_create(const struct dc_init_data *init_params)
1196 {
1197         struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1198         unsigned int full_pipe_count;
1199
1200         if (!dc)
1201                 return NULL;
1202
1203         if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1204                 if (!dc_construct_ctx(dc, init_params))
1205                         goto destruct_dc;
1206         } else {
1207                 if (!dc_construct(dc, init_params))
1208                         goto destruct_dc;
1209
1210                 full_pipe_count = dc->res_pool->pipe_count;
1211                 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1212                         full_pipe_count--;
1213                 dc->caps.max_streams = min(
1214                                 full_pipe_count,
1215                                 dc->res_pool->stream_enc_count);
1216
1217                 dc->caps.max_links = dc->link_count;
1218                 dc->caps.max_audios = dc->res_pool->audio_count;
1219                 dc->caps.linear_pitch_alignment = 64;
1220
1221                 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1222
1223                 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1224
1225                 if (dc->res_pool->dmcu != NULL)
1226                         dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1227         }
1228
1229         /* Populate versioning information */
1230         dc->versions.dc_ver = DC_VER;
1231
1232         dc->build_id = DC_BUILD_ID;
1233
1234         DC_LOG_DC("Display Core initialized\n");
1235
1236
1237
1238         return dc;
1239
1240 destruct_dc:
1241         dc_destruct(dc);
1242         kfree(dc);
1243         return NULL;
1244 }
1245
1246 static void detect_edp_presence(struct dc *dc)
1247 {
1248         struct dc_link *edp_links[MAX_NUM_EDP];
1249         struct dc_link *edp_link = NULL;
1250         enum dc_connection_type type;
1251         int i;
1252         int edp_num;
1253
1254         get_edp_links(dc, edp_links, &edp_num);
1255         if (!edp_num)
1256                 return;
1257
1258         for (i = 0; i < edp_num; i++) {
1259                 edp_link = edp_links[i];
1260                 if (dc->config.edp_not_connected) {
1261                         edp_link->edp_sink_present = false;
1262                 } else {
1263                         dc_link_detect_sink(edp_link, &type);
1264                         edp_link->edp_sink_present = (type != dc_connection_none);
1265                 }
1266         }
1267 }
1268
1269 void dc_hardware_init(struct dc *dc)
1270 {
1271
1272         detect_edp_presence(dc);
1273         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1274                 dc->hwss.init_hw(dc);
1275 }
1276
1277 void dc_init_callbacks(struct dc *dc,
1278                 const struct dc_callback_init *init_params)
1279 {
1280 #ifdef CONFIG_DRM_AMD_DC_HDCP
1281         dc->ctx->cp_psp = init_params->cp_psp;
1282 #endif
1283 }
1284
1285 void dc_deinit_callbacks(struct dc *dc)
1286 {
1287 #ifdef CONFIG_DRM_AMD_DC_HDCP
1288         memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1289 #endif
1290 }
1291
1292 void dc_destroy(struct dc **dc)
1293 {
1294         dc_destruct(*dc);
1295         kfree(*dc);
1296         *dc = NULL;
1297 }
1298
1299 static void enable_timing_multisync(
1300                 struct dc *dc,
1301                 struct dc_state *ctx)
1302 {
1303         int i, multisync_count = 0;
1304         int pipe_count = dc->res_pool->pipe_count;
1305         struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1306
1307         for (i = 0; i < pipe_count; i++) {
1308                 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1309                                 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1310                         continue;
1311                 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1312                         continue;
1313                 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1314                 multisync_count++;
1315         }
1316
1317         if (multisync_count > 0) {
1318                 dc->hwss.enable_per_frame_crtc_position_reset(
1319                         dc, multisync_count, multisync_pipes);
1320         }
1321 }
1322
1323 static void program_timing_sync(
1324                 struct dc *dc,
1325                 struct dc_state *ctx)
1326 {
1327         int i, j, k;
1328         int group_index = 0;
1329         int num_group = 0;
1330         int pipe_count = dc->res_pool->pipe_count;
1331         struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1332
1333         for (i = 0; i < pipe_count; i++) {
1334                 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1335                         continue;
1336
1337                 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1338         }
1339
1340         for (i = 0; i < pipe_count; i++) {
1341                 int group_size = 1;
1342                 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1343                 struct pipe_ctx *pipe_set[MAX_PIPES];
1344
1345                 if (!unsynced_pipes[i])
1346                         continue;
1347
1348                 pipe_set[0] = unsynced_pipes[i];
1349                 unsynced_pipes[i] = NULL;
1350
1351                 /* Add tg to the set, search rest of the tg's for ones with
1352                  * same timing, add all tgs with same timing to the group
1353                  */
1354                 for (j = i + 1; j < pipe_count; j++) {
1355                         if (!unsynced_pipes[j])
1356                                 continue;
1357                         if (sync_type != TIMING_SYNCHRONIZABLE &&
1358                                 dc->hwss.enable_vblanks_synchronization &&
1359                                 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1360                                 resource_are_vblanks_synchronizable(
1361                                         unsynced_pipes[j]->stream,
1362                                         pipe_set[0]->stream)) {
1363                                 sync_type = VBLANK_SYNCHRONIZABLE;
1364                                 pipe_set[group_size] = unsynced_pipes[j];
1365                                 unsynced_pipes[j] = NULL;
1366                                 group_size++;
1367                         } else
1368                         if (sync_type != VBLANK_SYNCHRONIZABLE &&
1369                                 resource_are_streams_timing_synchronizable(
1370                                         unsynced_pipes[j]->stream,
1371                                         pipe_set[0]->stream)) {
1372                                 sync_type = TIMING_SYNCHRONIZABLE;
1373                                 pipe_set[group_size] = unsynced_pipes[j];
1374                                 unsynced_pipes[j] = NULL;
1375                                 group_size++;
1376                         }
1377                 }
1378
1379                 /* set first unblanked pipe as master */
1380                 for (j = 0; j < group_size; j++) {
1381                         bool is_blanked;
1382
1383                         if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1384                                 is_blanked =
1385                                         pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1386                         else
1387                                 is_blanked =
1388                                         pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1389                         if (!is_blanked) {
1390                                 if (j == 0)
1391                                         break;
1392
1393                                 swap(pipe_set[0], pipe_set[j]);
1394                                 break;
1395                         }
1396                 }
1397
1398                 for (k = 0; k < group_size; k++) {
1399                         struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1400
1401                         status->timing_sync_info.group_id = num_group;
1402                         status->timing_sync_info.group_size = group_size;
1403                         if (k == 0)
1404                                 status->timing_sync_info.master = true;
1405                         else
1406                                 status->timing_sync_info.master = false;
1407
1408                 }
1409
1410                 /* remove any other pipes that are already been synced */
1411                 if (dc->config.use_pipe_ctx_sync_logic) {
1412                         /* check pipe's syncd to decide which pipe to be removed */
1413                         for (j = 1; j < group_size; j++) {
1414                                 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1415                                         group_size--;
1416                                         pipe_set[j] = pipe_set[group_size];
1417                                         j--;
1418                                 } else
1419                                         /* link slave pipe's syncd with master pipe */
1420                                         pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1421                         }
1422                 } else {
1423                         for (j = j + 1; j < group_size; j++) {
1424                                 bool is_blanked;
1425
1426                                 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1427                                         is_blanked =
1428                                                 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1429                                 else
1430                                         is_blanked =
1431                                                 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1432                                 if (!is_blanked) {
1433                                         group_size--;
1434                                         pipe_set[j] = pipe_set[group_size];
1435                                         j--;
1436                                 }
1437                         }
1438                 }
1439
1440                 if (group_size > 1) {
1441                         if (sync_type == TIMING_SYNCHRONIZABLE) {
1442                                 dc->hwss.enable_timing_synchronization(
1443                                         dc, group_index, group_size, pipe_set);
1444                         } else
1445                                 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1446                                 dc->hwss.enable_vblanks_synchronization(
1447                                         dc, group_index, group_size, pipe_set);
1448                                 }
1449                         group_index++;
1450                 }
1451                 num_group++;
1452         }
1453 }
1454
1455 static bool context_changed(
1456                 struct dc *dc,
1457                 struct dc_state *context)
1458 {
1459         uint8_t i;
1460
1461         if (context->stream_count != dc->current_state->stream_count)
1462                 return true;
1463
1464         for (i = 0; i < dc->current_state->stream_count; i++) {
1465                 if (dc->current_state->streams[i] != context->streams[i])
1466                         return true;
1467         }
1468
1469         return false;
1470 }
1471
1472 bool dc_validate_seamless_boot_timing(const struct dc *dc,
1473                                 const struct dc_sink *sink,
1474                                 struct dc_crtc_timing *crtc_timing)
1475 {
1476         struct timing_generator *tg;
1477         struct stream_encoder *se = NULL;
1478
1479         struct dc_crtc_timing hw_crtc_timing = {0};
1480
1481         struct dc_link *link = sink->link;
1482         unsigned int i, enc_inst, tg_inst = 0;
1483
1484         /* Support seamless boot on EDP displays only */
1485         if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1486                 return false;
1487         }
1488
1489         /* Check for enabled DIG to identify enabled display */
1490         if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1491                 return false;
1492
1493         enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1494
1495         if (enc_inst == ENGINE_ID_UNKNOWN)
1496                 return false;
1497
1498         for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1499                 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1500
1501                         se = dc->res_pool->stream_enc[i];
1502
1503                         tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1504                                 dc->res_pool->stream_enc[i]);
1505                         break;
1506                 }
1507         }
1508
1509         // tg_inst not found
1510         if (i == dc->res_pool->stream_enc_count)
1511                 return false;
1512
1513         if (tg_inst >= dc->res_pool->timing_generator_count)
1514                 return false;
1515
1516         tg = dc->res_pool->timing_generators[tg_inst];
1517
1518         if (!tg->funcs->get_hw_timing)
1519                 return false;
1520
1521         if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1522                 return false;
1523
1524         if (crtc_timing->h_total != hw_crtc_timing.h_total)
1525                 return false;
1526
1527         if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1528                 return false;
1529
1530         if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1531                 return false;
1532
1533         if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1534                 return false;
1535
1536         if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1537                 return false;
1538
1539         if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1540                 return false;
1541
1542         if (crtc_timing->v_total != hw_crtc_timing.v_total)
1543                 return false;
1544
1545         if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1546                 return false;
1547
1548         if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1549                 return false;
1550
1551         if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1552                 return false;
1553
1554         if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1555                 return false;
1556
1557         if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1558                 return false;
1559
1560         /* block DSC for now, as VBIOS does not currently support DSC timings */
1561         if (crtc_timing->flags.DSC)
1562                 return false;
1563
1564         if (dc_is_dp_signal(link->connector_signal)) {
1565                 unsigned int pix_clk_100hz;
1566
1567                 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1568                         dc->res_pool->dp_clock_source,
1569                         tg_inst, &pix_clk_100hz);
1570
1571                 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1572                         return false;
1573
1574                 if (!se->funcs->dp_get_pixel_format)
1575                         return false;
1576
1577                 if (!se->funcs->dp_get_pixel_format(
1578                         se,
1579                         &hw_crtc_timing.pixel_encoding,
1580                         &hw_crtc_timing.display_color_depth))
1581                         return false;
1582
1583                 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1584                         return false;
1585
1586                 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1587                         return false;
1588         }
1589
1590         if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1591                 return false;
1592         }
1593
1594         if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1595                 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1596                 return false;
1597         }
1598
1599         return true;
1600 }
1601
1602 static inline bool should_update_pipe_for_stream(
1603                 struct dc_state *context,
1604                 struct pipe_ctx *pipe_ctx,
1605                 struct dc_stream_state *stream)
1606 {
1607         return (pipe_ctx->stream && pipe_ctx->stream == stream);
1608 }
1609
1610 static inline bool should_update_pipe_for_plane(
1611                 struct dc_state *context,
1612                 struct pipe_ctx *pipe_ctx,
1613                 struct dc_plane_state *plane_state)
1614 {
1615         return (pipe_ctx->plane_state == plane_state);
1616 }
1617
1618 void dc_enable_stereo(
1619         struct dc *dc,
1620         struct dc_state *context,
1621         struct dc_stream_state *streams[],
1622         uint8_t stream_count)
1623 {
1624         int i, j;
1625         struct pipe_ctx *pipe;
1626
1627         for (i = 0; i < MAX_PIPES; i++) {
1628                 if (context != NULL) {
1629                         pipe = &context->res_ctx.pipe_ctx[i];
1630                 } else {
1631                         context = dc->current_state;
1632                         pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1633                 }
1634
1635                 for (j = 0; pipe && j < stream_count; j++)  {
1636                         if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1637                                 dc->hwss.setup_stereo)
1638                                 dc->hwss.setup_stereo(pipe, dc);
1639                 }
1640         }
1641 }
1642
1643 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1644 {
1645         if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1646                 enable_timing_multisync(dc, context);
1647                 program_timing_sync(dc, context);
1648         }
1649 }
1650
1651 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1652 {
1653         int i;
1654         unsigned int stream_mask = 0;
1655
1656         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1657                 if (context->res_ctx.pipe_ctx[i].stream)
1658                         stream_mask |= 1 << i;
1659         }
1660
1661         return stream_mask;
1662 }
1663
1664 #if defined(CONFIG_DRM_AMD_DC_DCN)
1665 void dc_z10_restore(const struct dc *dc)
1666 {
1667         if (dc->hwss.z10_restore)
1668                 dc->hwss.z10_restore(dc);
1669 }
1670
1671 void dc_z10_save_init(struct dc *dc)
1672 {
1673         if (dc->hwss.z10_save_init)
1674                 dc->hwss.z10_save_init(dc);
1675 }
1676 #endif
1677 /*
1678  * Applies given context to HW and copy it into current context.
1679  * It's up to the user to release the src context afterwards.
1680  */
1681 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1682 {
1683         struct dc_bios *dcb = dc->ctx->dc_bios;
1684         enum dc_status result = DC_ERROR_UNEXPECTED;
1685         struct pipe_ctx *pipe;
1686         int i, k, l;
1687         struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1688
1689 #if defined(CONFIG_DRM_AMD_DC_DCN)
1690         dc_z10_restore(dc);
1691         dc_allow_idle_optimizations(dc, false);
1692 #endif
1693
1694         for (i = 0; i < context->stream_count; i++)
1695                 dc_streams[i] =  context->streams[i];
1696
1697         if (!dcb->funcs->is_accelerated_mode(dcb)) {
1698                 disable_vbios_mode_if_required(dc, context);
1699                 dc->hwss.enable_accelerated_mode(dc, context);
1700         }
1701
1702         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1703                 context->stream_count == 0)
1704                 dc->hwss.prepare_bandwidth(dc, context);
1705
1706         disable_dangling_plane(dc, context);
1707         /* re-program planes for existing stream, in case we need to
1708          * free up plane resource for later use
1709          */
1710         if (dc->hwss.apply_ctx_for_surface) {
1711                 for (i = 0; i < context->stream_count; i++) {
1712                         if (context->streams[i]->mode_changed)
1713                                 continue;
1714                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1715                         dc->hwss.apply_ctx_for_surface(
1716                                 dc, context->streams[i],
1717                                 context->stream_status[i].plane_count,
1718                                 context); /* use new pipe config in new context */
1719                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1720                         dc->hwss.post_unlock_program_front_end(dc, context);
1721                 }
1722         }
1723
1724         /* Program hardware */
1725         for (i = 0; i < dc->res_pool->pipe_count; i++) {
1726                 pipe = &context->res_ctx.pipe_ctx[i];
1727                 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1728         }
1729
1730         result = dc->hwss.apply_ctx_to_hw(dc, context);
1731
1732         if (result != DC_OK)
1733                 return result;
1734
1735         dc_trigger_sync(dc, context);
1736
1737         /* Program all planes within new context*/
1738         if (dc->hwss.program_front_end_for_ctx) {
1739                 dc->hwss.interdependent_update_lock(dc, context, true);
1740                 dc->hwss.program_front_end_for_ctx(dc, context);
1741                 dc->hwss.interdependent_update_lock(dc, context, false);
1742                 dc->hwss.post_unlock_program_front_end(dc, context);
1743         }
1744         for (i = 0; i < context->stream_count; i++) {
1745                 const struct dc_link *link = context->streams[i]->link;
1746
1747                 if (!context->streams[i]->mode_changed)
1748                         continue;
1749
1750                 if (dc->hwss.apply_ctx_for_surface) {
1751                         apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1752                         dc->hwss.apply_ctx_for_surface(
1753                                         dc, context->streams[i],
1754                                         context->stream_status[i].plane_count,
1755                                         context);
1756                         apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1757                         dc->hwss.post_unlock_program_front_end(dc, context);
1758                 }
1759
1760                 /*
1761                  * enable stereo
1762                  * TODO rework dc_enable_stereo call to work with validation sets?
1763                  */
1764                 for (k = 0; k < MAX_PIPES; k++) {
1765                         pipe = &context->res_ctx.pipe_ctx[k];
1766
1767                         for (l = 0 ; pipe && l < context->stream_count; l++)  {
1768                                 if (context->streams[l] &&
1769                                         context->streams[l] == pipe->stream &&
1770                                         dc->hwss.setup_stereo)
1771                                         dc->hwss.setup_stereo(pipe, dc);
1772                         }
1773                 }
1774
1775                 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1776                                 context->streams[i]->timing.h_addressable,
1777                                 context->streams[i]->timing.v_addressable,
1778                                 context->streams[i]->timing.h_total,
1779                                 context->streams[i]->timing.v_total,
1780                                 context->streams[i]->timing.pix_clk_100hz / 10);
1781         }
1782
1783         dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1784
1785         if (context->stream_count > get_seamless_boot_stream_count(context) ||
1786                 context->stream_count == 0) {
1787                 /* Must wait for no flips to be pending before doing optimize bw */
1788                 wait_for_no_pipes_pending(dc, context);
1789                 /* pplib is notified if disp_num changed */
1790                 dc->hwss.optimize_bandwidth(dc, context);
1791         }
1792
1793         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1794                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1795         else
1796                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1797
1798         context->stream_mask = get_stream_mask(dc, context);
1799
1800         if (context->stream_mask != dc->current_state->stream_mask)
1801                 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1802
1803         for (i = 0; i < context->stream_count; i++)
1804                 context->streams[i]->mode_changed = false;
1805
1806         dc_release_state(dc->current_state);
1807
1808         dc->current_state = context;
1809
1810         dc_retain_state(dc->current_state);
1811
1812         return result;
1813 }
1814
1815 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1816 {
1817         enum dc_status result = DC_ERROR_UNEXPECTED;
1818         int i;
1819
1820         if (!context_changed(dc, context))
1821                 return DC_OK;
1822
1823         DC_LOG_DC("%s: %d streams\n",
1824                                 __func__, context->stream_count);
1825
1826         for (i = 0; i < context->stream_count; i++) {
1827                 struct dc_stream_state *stream = context->streams[i];
1828
1829                 dc_stream_log(dc, stream);
1830         }
1831
1832         /*
1833          * Previous validation was perfomred with fast_validation = true and
1834          * the full DML state required for hardware programming was skipped.
1835          *
1836          * Re-validate here to calculate these parameters / watermarks.
1837          */
1838         result = dc_validate_global_state(dc, context, false);
1839         if (result != DC_OK) {
1840                 DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1841                              dc_status_to_str(result), result);
1842                 return result;
1843         }
1844
1845         result = dc_commit_state_no_check(dc, context);
1846
1847         return (result == DC_OK);
1848 }
1849
1850 #if defined(CONFIG_DRM_AMD_DC_DCN)
1851 bool dc_acquire_release_mpc_3dlut(
1852                 struct dc *dc, bool acquire,
1853                 struct dc_stream_state *stream,
1854                 struct dc_3dlut **lut,
1855                 struct dc_transfer_func **shaper)
1856 {
1857         int pipe_idx;
1858         bool ret = false;
1859         bool found_pipe_idx = false;
1860         const struct resource_pool *pool = dc->res_pool;
1861         struct resource_context *res_ctx = &dc->current_state->res_ctx;
1862         int mpcc_id = 0;
1863
1864         if (pool && res_ctx) {
1865                 if (acquire) {
1866                         /*find pipe idx for the given stream*/
1867                         for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1868                                 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1869                                         found_pipe_idx = true;
1870                                         mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1871                                         break;
1872                                 }
1873                         }
1874                 } else
1875                         found_pipe_idx = true;/*for release pipe_idx is not required*/
1876
1877                 if (found_pipe_idx) {
1878                         if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1879                                 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1880                         else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1881                                 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1882                 }
1883         }
1884         return ret;
1885 }
1886 #endif
1887 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1888 {
1889         int i;
1890         struct pipe_ctx *pipe;
1891
1892         for (i = 0; i < MAX_PIPES; i++) {
1893                 pipe = &context->res_ctx.pipe_ctx[i];
1894
1895                 if (!pipe->plane_state)
1896                         continue;
1897
1898                 /* Must set to false to start with, due to OR in update function */
1899                 pipe->plane_state->status.is_flip_pending = false;
1900                 dc->hwss.update_pending_status(pipe);
1901                 if (pipe->plane_state->status.is_flip_pending)
1902                         return true;
1903         }
1904         return false;
1905 }
1906
1907 #ifdef CONFIG_DRM_AMD_DC_DCN
1908 /* Perform updates here which need to be deferred until next vupdate
1909  *
1910  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1911  * but forcing lut memory to shutdown state is immediate. This causes
1912  * single frame corruption as lut gets disabled mid-frame unless shutdown
1913  * is deferred until after entering bypass.
1914  */
1915 static void process_deferred_updates(struct dc *dc)
1916 {
1917         int i = 0;
1918
1919         if (dc->debug.enable_mem_low_power.bits.cm) {
1920                 ASSERT(dc->dcn_ip->max_num_dpp);
1921                 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1922                         if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1923                                 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1924         }
1925 }
1926 #endif /* CONFIG_DRM_AMD_DC_DCN */
1927
1928 void dc_post_update_surfaces_to_stream(struct dc *dc)
1929 {
1930         int i;
1931         struct dc_state *context = dc->current_state;
1932
1933         if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1934                 return;
1935
1936         post_surface_trace(dc);
1937
1938         if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1939                 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1940         else
1941                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1942
1943         if (is_flip_pending_in_pipes(dc, context))
1944                 return;
1945
1946         for (i = 0; i < dc->res_pool->pipe_count; i++)
1947                 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1948                     context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1949                         context->res_ctx.pipe_ctx[i].pipe_idx = i;
1950                         dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1951                 }
1952
1953 #ifdef CONFIG_DRM_AMD_DC_DCN
1954         process_deferred_updates(dc);
1955 #endif
1956
1957         dc->hwss.optimize_bandwidth(dc, context);
1958
1959         dc->optimized_required = false;
1960         dc->wm_optimized_required = false;
1961 }
1962
1963 static void init_state(struct dc *dc, struct dc_state *context)
1964 {
1965         /* Each context must have their own instance of VBA and in order to
1966          * initialize and obtain IP and SOC the base DML instance from DC is
1967          * initially copied into every context
1968          */
1969 #ifdef CONFIG_DRM_AMD_DC_DCN
1970         memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1971 #endif
1972 }
1973
1974 struct dc_state *dc_create_state(struct dc *dc)
1975 {
1976         struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1977                                             GFP_KERNEL);
1978
1979         if (!context)
1980                 return NULL;
1981
1982         init_state(dc, context);
1983
1984         kref_init(&context->refcount);
1985
1986         return context;
1987 }
1988
1989 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1990 {
1991         int i, j;
1992         struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1993
1994         if (!new_ctx)
1995                 return NULL;
1996         memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1997
1998         for (i = 0; i < MAX_PIPES; i++) {
1999                         struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2000
2001                         if (cur_pipe->top_pipe)
2002                                 cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2003
2004                         if (cur_pipe->bottom_pipe)
2005                                 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2006
2007                         if (cur_pipe->prev_odm_pipe)
2008                                 cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2009
2010                         if (cur_pipe->next_odm_pipe)
2011                                 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2012
2013         }
2014
2015         for (i = 0; i < new_ctx->stream_count; i++) {
2016                         dc_stream_retain(new_ctx->streams[i]);
2017                         for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2018                                 dc_plane_state_retain(
2019                                         new_ctx->stream_status[i].plane_states[j]);
2020         }
2021
2022         kref_init(&new_ctx->refcount);
2023
2024         return new_ctx;
2025 }
2026
2027 void dc_retain_state(struct dc_state *context)
2028 {
2029         kref_get(&context->refcount);
2030 }
2031
2032 static void dc_state_free(struct kref *kref)
2033 {
2034         struct dc_state *context = container_of(kref, struct dc_state, refcount);
2035         dc_resource_state_destruct(context);
2036         kvfree(context);
2037 }
2038
2039 void dc_release_state(struct dc_state *context)
2040 {
2041         kref_put(&context->refcount, dc_state_free);
2042 }
2043
2044 bool dc_set_generic_gpio_for_stereo(bool enable,
2045                 struct gpio_service *gpio_service)
2046 {
2047         enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2048         struct gpio_pin_info pin_info;
2049         struct gpio *generic;
2050         struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2051                            GFP_KERNEL);
2052
2053         if (!config)
2054                 return false;
2055         pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2056
2057         if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2058                 kfree(config);
2059                 return false;
2060         } else {
2061                 generic = dal_gpio_service_create_generic_mux(
2062                         gpio_service,
2063                         pin_info.offset,
2064                         pin_info.mask);
2065         }
2066
2067         if (!generic) {
2068                 kfree(config);
2069                 return false;
2070         }
2071
2072         gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2073
2074         config->enable_output_from_mux = enable;
2075         config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2076
2077         if (gpio_result == GPIO_RESULT_OK)
2078                 gpio_result = dal_mux_setup_config(generic, config);
2079
2080         if (gpio_result == GPIO_RESULT_OK) {
2081                 dal_gpio_close(generic);
2082                 dal_gpio_destroy_generic_mux(&generic);
2083                 kfree(config);
2084                 return true;
2085         } else {
2086                 dal_gpio_close(generic);
2087                 dal_gpio_destroy_generic_mux(&generic);
2088                 kfree(config);
2089                 return false;
2090         }
2091 }
2092
2093 static bool is_surface_in_context(
2094                 const struct dc_state *context,
2095                 const struct dc_plane_state *plane_state)
2096 {
2097         int j;
2098
2099         for (j = 0; j < MAX_PIPES; j++) {
2100                 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2101
2102                 if (plane_state == pipe_ctx->plane_state) {
2103                         return true;
2104                 }
2105         }
2106
2107         return false;
2108 }
2109
2110 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2111 {
2112         union surface_update_flags *update_flags = &u->surface->update_flags;
2113         enum surface_update_type update_type = UPDATE_TYPE_FAST;
2114
2115         if (!u->plane_info)
2116                 return UPDATE_TYPE_FAST;
2117
2118         if (u->plane_info->color_space != u->surface->color_space) {
2119                 update_flags->bits.color_space_change = 1;
2120                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2121         }
2122
2123         if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2124                 update_flags->bits.horizontal_mirror_change = 1;
2125                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2126         }
2127
2128         if (u->plane_info->rotation != u->surface->rotation) {
2129                 update_flags->bits.rotation_change = 1;
2130                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2131         }
2132
2133         if (u->plane_info->format != u->surface->format) {
2134                 update_flags->bits.pixel_format_change = 1;
2135                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2136         }
2137
2138         if (u->plane_info->stereo_format != u->surface->stereo_format) {
2139                 update_flags->bits.stereo_format_change = 1;
2140                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2141         }
2142
2143         if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2144                 update_flags->bits.per_pixel_alpha_change = 1;
2145                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2146         }
2147
2148         if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2149                 update_flags->bits.global_alpha_change = 1;
2150                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2151         }
2152
2153         if (u->plane_info->dcc.enable != u->surface->dcc.enable
2154                         || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2155                         || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2156                 /* During DCC on/off, stutter period is calculated before
2157                  * DCC has fully transitioned. This results in incorrect
2158                  * stutter period calculation. Triggering a full update will
2159                  * recalculate stutter period.
2160                  */
2161                 update_flags->bits.dcc_change = 1;
2162                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2163         }
2164
2165         if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2166                         resource_pixel_format_to_bpp(u->surface->format)) {
2167                 /* different bytes per element will require full bandwidth
2168                  * and DML calculation
2169                  */
2170                 update_flags->bits.bpp_change = 1;
2171                 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2172         }
2173
2174         if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2175                         || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2176                 update_flags->bits.plane_size_change = 1;
2177                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2178         }
2179
2180
2181         if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2182                         sizeof(union dc_tiling_info)) != 0) {
2183                 update_flags->bits.swizzle_change = 1;
2184                 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2185
2186                 /* todo: below are HW dependent, we should add a hook to
2187                  * DCE/N resource and validated there.
2188                  */
2189                 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2190                         /* swizzled mode requires RQ to be setup properly,
2191                          * thus need to run DML to calculate RQ settings
2192                          */
2193                         update_flags->bits.bandwidth_change = 1;
2194                         elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2195                 }
2196         }
2197
2198         /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2199         return update_type;
2200 }
2201
2202 static enum surface_update_type get_scaling_info_update_type(
2203                 const struct dc_surface_update *u)
2204 {
2205         union surface_update_flags *update_flags = &u->surface->update_flags;
2206
2207         if (!u->scaling_info)
2208                 return UPDATE_TYPE_FAST;
2209
2210         if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2211                         || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2212                         || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2213                         || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2214                         || u->scaling_info->scaling_quality.integer_scaling !=
2215                                 u->surface->scaling_quality.integer_scaling
2216                         ) {
2217                 update_flags->bits.scaling_change = 1;
2218
2219                 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2220                         || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2221                                 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2222                                         || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2223                         /* Making dst rect smaller requires a bandwidth change */
2224                         update_flags->bits.bandwidth_change = 1;
2225         }
2226
2227         if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2228                 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2229
2230                 update_flags->bits.scaling_change = 1;
2231                 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2232                                 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2233                         /* Making src rect bigger requires a bandwidth change */
2234                         update_flags->bits.clock_change = 1;
2235         }
2236
2237         if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2238                         || u->scaling_info->src_rect.y != u->surface->src_rect.y
2239                         || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2240                         || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2241                         || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2242                         || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2243                 update_flags->bits.position_change = 1;
2244
2245         if (update_flags->bits.clock_change
2246                         || update_flags->bits.bandwidth_change
2247                         || update_flags->bits.scaling_change)
2248                 return UPDATE_TYPE_FULL;
2249
2250         if (update_flags->bits.position_change)
2251                 return UPDATE_TYPE_MED;
2252
2253         return UPDATE_TYPE_FAST;
2254 }
2255
2256 static enum surface_update_type det_surface_update(const struct dc *dc,
2257                 const struct dc_surface_update *u)
2258 {
2259         const struct dc_state *context = dc->current_state;
2260         enum surface_update_type type;
2261         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2262         union surface_update_flags *update_flags = &u->surface->update_flags;
2263
2264         if (u->flip_addr)
2265                 update_flags->bits.addr_update = 1;
2266
2267         if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2268                 update_flags->raw = 0xFFFFFFFF;
2269                 return UPDATE_TYPE_FULL;
2270         }
2271
2272         update_flags->raw = 0; // Reset all flags
2273
2274         type = get_plane_info_update_type(u);
2275         elevate_update_type(&overall_type, type);
2276
2277         type = get_scaling_info_update_type(u);
2278         elevate_update_type(&overall_type, type);
2279
2280         if (u->flip_addr)
2281                 update_flags->bits.addr_update = 1;
2282
2283         if (u->in_transfer_func)
2284                 update_flags->bits.in_transfer_func_change = 1;
2285
2286         if (u->input_csc_color_matrix)
2287                 update_flags->bits.input_csc_change = 1;
2288
2289         if (u->coeff_reduction_factor)
2290                 update_flags->bits.coeff_reduction_change = 1;
2291
2292         if (u->gamut_remap_matrix)
2293                 update_flags->bits.gamut_remap_change = 1;
2294
2295         if (u->gamma) {
2296                 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2297
2298                 if (u->plane_info)
2299                         format = u->plane_info->format;
2300                 else if (u->surface)
2301                         format = u->surface->format;
2302
2303                 if (dce_use_lut(format))
2304                         update_flags->bits.gamma_change = 1;
2305         }
2306
2307         if (u->lut3d_func || u->func_shaper)
2308                 update_flags->bits.lut_3d = 1;
2309
2310         if (u->hdr_mult.value)
2311                 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2312                         update_flags->bits.hdr_mult = 1;
2313                         elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2314                 }
2315
2316         if (update_flags->bits.in_transfer_func_change) {
2317                 type = UPDATE_TYPE_MED;
2318                 elevate_update_type(&overall_type, type);
2319         }
2320
2321         if (update_flags->bits.input_csc_change
2322                         || update_flags->bits.coeff_reduction_change
2323                         || update_flags->bits.lut_3d
2324                         || update_flags->bits.gamma_change
2325                         || update_flags->bits.gamut_remap_change) {
2326                 type = UPDATE_TYPE_FULL;
2327                 elevate_update_type(&overall_type, type);
2328         }
2329
2330         return overall_type;
2331 }
2332
2333 static enum surface_update_type check_update_surfaces_for_stream(
2334                 struct dc *dc,
2335                 struct dc_surface_update *updates,
2336                 int surface_count,
2337                 struct dc_stream_update *stream_update,
2338                 const struct dc_stream_status *stream_status)
2339 {
2340         int i;
2341         enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2342
2343 #if defined(CONFIG_DRM_AMD_DC_DCN)
2344         if (dc->idle_optimizations_allowed)
2345                 overall_type = UPDATE_TYPE_FULL;
2346
2347 #endif
2348         if (stream_status == NULL || stream_status->plane_count != surface_count)
2349                 overall_type = UPDATE_TYPE_FULL;
2350
2351         if (stream_update && stream_update->pending_test_pattern) {
2352                 overall_type = UPDATE_TYPE_FULL;
2353         }
2354
2355         /* some stream updates require passive update */
2356         if (stream_update) {
2357                 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2358
2359                 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2360                         (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2361                         stream_update->integer_scaling_update)
2362                         su_flags->bits.scaling = 1;
2363
2364                 if (stream_update->out_transfer_func)
2365                         su_flags->bits.out_tf = 1;
2366
2367                 if (stream_update->abm_level)
2368                         su_flags->bits.abm_level = 1;
2369
2370                 if (stream_update->dpms_off)
2371                         su_flags->bits.dpms_off = 1;
2372
2373                 if (stream_update->gamut_remap)
2374                         su_flags->bits.gamut_remap = 1;
2375
2376                 if (stream_update->wb_update)
2377                         su_flags->bits.wb_update = 1;
2378
2379                 if (stream_update->dsc_config)
2380                         su_flags->bits.dsc_changed = 1;
2381
2382 #if defined(CONFIG_DRM_AMD_DC_DCN)
2383                 if (stream_update->mst_bw_update)
2384                         su_flags->bits.mst_bw = 1;
2385 #endif
2386
2387                 if (su_flags->raw != 0)
2388                         overall_type = UPDATE_TYPE_FULL;
2389
2390                 if (stream_update->output_csc_transform || stream_update->output_color_space)
2391                         su_flags->bits.out_csc = 1;
2392         }
2393
2394         for (i = 0 ; i < surface_count; i++) {
2395                 enum surface_update_type type =
2396                                 det_surface_update(dc, &updates[i]);
2397
2398                 elevate_update_type(&overall_type, type);
2399         }
2400
2401         return overall_type;
2402 }
2403
2404 /*
2405  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2406  *
2407  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2408  */
2409 enum surface_update_type dc_check_update_surfaces_for_stream(
2410                 struct dc *dc,
2411                 struct dc_surface_update *updates,
2412                 int surface_count,
2413                 struct dc_stream_update *stream_update,
2414                 const struct dc_stream_status *stream_status)
2415 {
2416         int i;
2417         enum surface_update_type type;
2418
2419         if (stream_update)
2420                 stream_update->stream->update_flags.raw = 0;
2421         for (i = 0; i < surface_count; i++)
2422                 updates[i].surface->update_flags.raw = 0;
2423
2424         type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2425         if (type == UPDATE_TYPE_FULL) {
2426                 if (stream_update) {
2427                         uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2428                         stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2429                         stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2430                 }
2431                 for (i = 0; i < surface_count; i++)
2432                         updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2433         }
2434
2435         if (type == UPDATE_TYPE_FAST) {
2436                 // If there's an available clock comparator, we use that.
2437                 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2438                         if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2439                                 dc->optimized_required = true;
2440                 // Else we fallback to mem compare.
2441                 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2442                         dc->optimized_required = true;
2443                 }
2444
2445                 dc->optimized_required |= dc->wm_optimized_required;
2446         }
2447
2448         return type;
2449 }
2450
2451 static struct dc_stream_status *stream_get_status(
2452         struct dc_state *ctx,
2453         struct dc_stream_state *stream)
2454 {
2455         uint8_t i;
2456
2457         for (i = 0; i < ctx->stream_count; i++) {
2458                 if (stream == ctx->streams[i]) {
2459                         return &ctx->stream_status[i];
2460                 }
2461         }
2462
2463         return NULL;
2464 }
2465
2466 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2467
2468 static void copy_surface_update_to_plane(
2469                 struct dc_plane_state *surface,
2470                 struct dc_surface_update *srf_update)
2471 {
2472         if (srf_update->flip_addr) {
2473                 surface->address = srf_update->flip_addr->address;
2474                 surface->flip_immediate =
2475                         srf_update->flip_addr->flip_immediate;
2476                 surface->time.time_elapsed_in_us[surface->time.index] =
2477                         srf_update->flip_addr->flip_timestamp_in_us -
2478                                 surface->time.prev_update_time_in_us;
2479                 surface->time.prev_update_time_in_us =
2480                         srf_update->flip_addr->flip_timestamp_in_us;
2481                 surface->time.index++;
2482                 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2483                         surface->time.index = 0;
2484
2485                 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2486         }
2487
2488         if (srf_update->scaling_info) {
2489                 surface->scaling_quality =
2490                                 srf_update->scaling_info->scaling_quality;
2491                 surface->dst_rect =
2492                                 srf_update->scaling_info->dst_rect;
2493                 surface->src_rect =
2494                                 srf_update->scaling_info->src_rect;
2495                 surface->clip_rect =
2496                                 srf_update->scaling_info->clip_rect;
2497         }
2498
2499         if (srf_update->plane_info) {
2500                 surface->color_space =
2501                                 srf_update->plane_info->color_space;
2502                 surface->format =
2503                                 srf_update->plane_info->format;
2504                 surface->plane_size =
2505                                 srf_update->plane_info->plane_size;
2506                 surface->rotation =
2507                                 srf_update->plane_info->rotation;
2508                 surface->horizontal_mirror =
2509                                 srf_update->plane_info->horizontal_mirror;
2510                 surface->stereo_format =
2511                                 srf_update->plane_info->stereo_format;
2512                 surface->tiling_info =
2513                                 srf_update->plane_info->tiling_info;
2514                 surface->visible =
2515                                 srf_update->plane_info->visible;
2516                 surface->per_pixel_alpha =
2517                                 srf_update->plane_info->per_pixel_alpha;
2518                 surface->global_alpha =
2519                                 srf_update->plane_info->global_alpha;
2520                 surface->global_alpha_value =
2521                                 srf_update->plane_info->global_alpha_value;
2522                 surface->dcc =
2523                                 srf_update->plane_info->dcc;
2524                 surface->layer_index =
2525                                 srf_update->plane_info->layer_index;
2526         }
2527
2528         if (srf_update->gamma &&
2529                         (surface->gamma_correction !=
2530                                         srf_update->gamma)) {
2531                 memcpy(&surface->gamma_correction->entries,
2532                         &srf_update->gamma->entries,
2533                         sizeof(struct dc_gamma_entries));
2534                 surface->gamma_correction->is_identity =
2535                         srf_update->gamma->is_identity;
2536                 surface->gamma_correction->num_entries =
2537                         srf_update->gamma->num_entries;
2538                 surface->gamma_correction->type =
2539                         srf_update->gamma->type;
2540         }
2541
2542         if (srf_update->in_transfer_func &&
2543                         (surface->in_transfer_func !=
2544                                 srf_update->in_transfer_func)) {
2545                 surface->in_transfer_func->sdr_ref_white_level =
2546                         srf_update->in_transfer_func->sdr_ref_white_level;
2547                 surface->in_transfer_func->tf =
2548                         srf_update->in_transfer_func->tf;
2549                 surface->in_transfer_func->type =
2550                         srf_update->in_transfer_func->type;
2551                 memcpy(&surface->in_transfer_func->tf_pts,
2552                         &srf_update->in_transfer_func->tf_pts,
2553                         sizeof(struct dc_transfer_func_distributed_points));
2554         }
2555
2556         if (srf_update->func_shaper &&
2557                         (surface->in_shaper_func !=
2558                         srf_update->func_shaper))
2559                 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2560                 sizeof(*surface->in_shaper_func));
2561
2562         if (srf_update->lut3d_func &&
2563                         (surface->lut3d_func !=
2564                         srf_update->lut3d_func))
2565                 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2566                 sizeof(*surface->lut3d_func));
2567
2568         if (srf_update->hdr_mult.value)
2569                 surface->hdr_mult =
2570                                 srf_update->hdr_mult;
2571
2572         if (srf_update->blend_tf &&
2573                         (surface->blend_tf !=
2574                         srf_update->blend_tf))
2575                 memcpy(surface->blend_tf, srf_update->blend_tf,
2576                 sizeof(*surface->blend_tf));
2577
2578         if (srf_update->input_csc_color_matrix)
2579                 surface->input_csc_color_matrix =
2580                         *srf_update->input_csc_color_matrix;
2581
2582         if (srf_update->coeff_reduction_factor)
2583                 surface->coeff_reduction_factor =
2584                         *srf_update->coeff_reduction_factor;
2585
2586         if (srf_update->gamut_remap_matrix)
2587                 surface->gamut_remap_matrix =
2588                         *srf_update->gamut_remap_matrix;
2589 }
2590
2591 static void copy_stream_update_to_stream(struct dc *dc,
2592                                          struct dc_state *context,
2593                                          struct dc_stream_state *stream,
2594                                          struct dc_stream_update *update)
2595 {
2596         struct dc_context *dc_ctx = dc->ctx;
2597
2598         if (update == NULL || stream == NULL)
2599                 return;
2600
2601         if (update->src.height && update->src.width)
2602                 stream->src = update->src;
2603
2604         if (update->dst.height && update->dst.width)
2605                 stream->dst = update->dst;
2606
2607         if (update->out_transfer_func &&
2608             stream->out_transfer_func != update->out_transfer_func) {
2609                 stream->out_transfer_func->sdr_ref_white_level =
2610                         update->out_transfer_func->sdr_ref_white_level;
2611                 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2612                 stream->out_transfer_func->type =
2613                         update->out_transfer_func->type;
2614                 memcpy(&stream->out_transfer_func->tf_pts,
2615                        &update->out_transfer_func->tf_pts,
2616                        sizeof(struct dc_transfer_func_distributed_points));
2617         }
2618
2619         if (update->hdr_static_metadata)
2620                 stream->hdr_static_metadata = *update->hdr_static_metadata;
2621
2622         if (update->abm_level)
2623                 stream->abm_level = *update->abm_level;
2624
2625         if (update->periodic_interrupt0)
2626                 stream->periodic_interrupt0 = *update->periodic_interrupt0;
2627
2628         if (update->periodic_interrupt1)
2629                 stream->periodic_interrupt1 = *update->periodic_interrupt1;
2630
2631         if (update->gamut_remap)
2632                 stream->gamut_remap_matrix = *update->gamut_remap;
2633
2634         /* Note: this being updated after mode set is currently not a use case
2635          * however if it arises OCSC would need to be reprogrammed at the
2636          * minimum
2637          */
2638         if (update->output_color_space)
2639                 stream->output_color_space = *update->output_color_space;
2640
2641         if (update->output_csc_transform)
2642                 stream->csc_color_matrix = *update->output_csc_transform;
2643
2644         if (update->vrr_infopacket)
2645                 stream->vrr_infopacket = *update->vrr_infopacket;
2646
2647         if (update->dpms_off)
2648                 stream->dpms_off = *update->dpms_off;
2649
2650         if (update->vsc_infopacket)
2651                 stream->vsc_infopacket = *update->vsc_infopacket;
2652
2653         if (update->vsp_infopacket)
2654                 stream->vsp_infopacket = *update->vsp_infopacket;
2655
2656         if (update->dither_option)
2657                 stream->dither_option = *update->dither_option;
2658
2659         if (update->pending_test_pattern)
2660                 stream->test_pattern = *update->pending_test_pattern;
2661         /* update current stream with writeback info */
2662         if (update->wb_update) {
2663                 int i;
2664
2665                 stream->num_wb_info = update->wb_update->num_wb_info;
2666                 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2667                 for (i = 0; i < stream->num_wb_info; i++)
2668                         stream->writeback_info[i] =
2669                                 update->wb_update->writeback_info[i];
2670         }
2671         if (update->dsc_config) {
2672                 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2673                 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2674                 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2675                                        update->dsc_config->num_slices_v != 0);
2676
2677                 /* Use temporarry context for validating new DSC config */
2678                 struct dc_state *dsc_validate_context = dc_create_state(dc);
2679
2680                 if (dsc_validate_context) {
2681                         dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2682
2683                         stream->timing.dsc_cfg = *update->dsc_config;
2684                         stream->timing.flags.DSC = enable_dsc;
2685                         if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2686                                 stream->timing.dsc_cfg = old_dsc_cfg;
2687                                 stream->timing.flags.DSC = old_dsc_enabled;
2688                                 update->dsc_config = NULL;
2689                         }
2690
2691                         dc_release_state(dsc_validate_context);
2692                 } else {
2693                         DC_ERROR("Failed to allocate new validate context for DSC change\n");
2694                         update->dsc_config = NULL;
2695                 }
2696         }
2697 }
2698
2699 static void commit_planes_do_stream_update(struct dc *dc,
2700                 struct dc_stream_state *stream,
2701                 struct dc_stream_update *stream_update,
2702                 enum surface_update_type update_type,
2703                 struct dc_state *context)
2704 {
2705         int j;
2706
2707         // Stream updates
2708         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2709                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2710
2711                 if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2712
2713                         if (stream_update->periodic_interrupt0 &&
2714                                         dc->hwss.setup_periodic_interrupt)
2715                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2716
2717                         if (stream_update->periodic_interrupt1 &&
2718                                         dc->hwss.setup_periodic_interrupt)
2719                                 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2720
2721                         if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2722                                         stream_update->vrr_infopacket ||
2723                                         stream_update->vsc_infopacket ||
2724                                         stream_update->vsp_infopacket) {
2725                                 resource_build_info_frame(pipe_ctx);
2726                                 dc->hwss.update_info_frame(pipe_ctx);
2727                         }
2728
2729                         if (stream_update->hdr_static_metadata &&
2730                                         stream->use_dynamic_meta &&
2731                                         dc->hwss.set_dmdata_attributes &&
2732                                         pipe_ctx->stream->dmdata_address.quad_part != 0)
2733                                 dc->hwss.set_dmdata_attributes(pipe_ctx);
2734
2735                         if (stream_update->gamut_remap)
2736                                 dc_stream_set_gamut_remap(dc, stream);
2737
2738                         if (stream_update->output_csc_transform)
2739                                 dc_stream_program_csc_matrix(dc, stream);
2740
2741                         if (stream_update->dither_option) {
2742                                 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2743                                 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2744                                                                         &pipe_ctx->stream->bit_depth_params);
2745                                 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2746                                                 &stream->bit_depth_params,
2747                                                 &stream->clamping);
2748                                 while (odm_pipe) {
2749                                         odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2750                                                         &stream->bit_depth_params,
2751                                                         &stream->clamping);
2752                                         odm_pipe = odm_pipe->next_odm_pipe;
2753                                 }
2754                         }
2755
2756
2757                         /* Full fe update*/
2758                         if (update_type == UPDATE_TYPE_FAST)
2759                                 continue;
2760
2761                         if (stream_update->dsc_config)
2762                                 dp_update_dsc_config(pipe_ctx);
2763
2764 #if defined(CONFIG_DRM_AMD_DC_DCN)
2765                         if (stream_update->mst_bw_update) {
2766                                 if (stream_update->mst_bw_update->is_increase)
2767                                         dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2768                                 else
2769                                         dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2770                         }
2771 #endif
2772
2773                         if (stream_update->pending_test_pattern) {
2774                                 dc_link_dp_set_test_pattern(stream->link,
2775                                         stream->test_pattern.type,
2776                                         stream->test_pattern.color_space,
2777                                         stream->test_pattern.p_link_settings,
2778                                         stream->test_pattern.p_custom_pattern,
2779                                         stream->test_pattern.cust_pattern_size);
2780                         }
2781
2782                         if (stream_update->dpms_off) {
2783                                 if (*stream_update->dpms_off) {
2784                                         core_link_disable_stream(pipe_ctx);
2785                                         /* for dpms, keep acquired resources*/
2786                                         if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2787                                                 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2788
2789                                         dc->optimized_required = true;
2790
2791                                 } else {
2792                                         if (get_seamless_boot_stream_count(context) == 0)
2793                                                 dc->hwss.prepare_bandwidth(dc, dc->current_state);
2794
2795                                         core_link_enable_stream(dc->current_state, pipe_ctx);
2796                                 }
2797                         }
2798
2799                         if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2800                                 bool should_program_abm = true;
2801
2802                                 // if otg funcs defined check if blanked before programming
2803                                 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2804                                         if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2805                                                 should_program_abm = false;
2806
2807                                 if (should_program_abm) {
2808                                         if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2809                                                 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2810                                         } else {
2811                                                 pipe_ctx->stream_res.abm->funcs->set_abm_level(
2812                                                         pipe_ctx->stream_res.abm, stream->abm_level);
2813                                         }
2814                                 }
2815                         }
2816                 }
2817         }
2818 }
2819
2820 static void commit_planes_for_stream(struct dc *dc,
2821                 struct dc_surface_update *srf_updates,
2822                 int surface_count,
2823                 struct dc_stream_state *stream,
2824                 struct dc_stream_update *stream_update,
2825                 enum surface_update_type update_type,
2826                 struct dc_state *context)
2827 {
2828         int i, j;
2829         struct pipe_ctx *top_pipe_to_program = NULL;
2830         bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
2831
2832 #if defined(CONFIG_DRM_AMD_DC_DCN)
2833         dc_z10_restore(dc);
2834 #endif
2835
2836         if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2837                 /* Optimize seamless boot flag keeps clocks and watermarks high until
2838                  * first flip. After first flip, optimization is required to lower
2839                  * bandwidth. Important to note that it is expected UEFI will
2840                  * only light up a single display on POST, therefore we only expect
2841                  * one stream with seamless boot flag set.
2842                  */
2843                 if (stream->apply_seamless_boot_optimization) {
2844                         stream->apply_seamless_boot_optimization = false;
2845
2846                         if (get_seamless_boot_stream_count(context) == 0)
2847                                 dc->optimized_required = true;
2848                 }
2849         }
2850
2851         if (update_type == UPDATE_TYPE_FULL) {
2852 #if defined(CONFIG_DRM_AMD_DC_DCN)
2853                 dc_allow_idle_optimizations(dc, false);
2854
2855 #endif
2856                 if (get_seamless_boot_stream_count(context) == 0)
2857                         dc->hwss.prepare_bandwidth(dc, context);
2858
2859                 context_clock_trace(dc, context);
2860         }
2861
2862         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2863                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2864
2865                 if (!pipe_ctx->top_pipe &&
2866                         !pipe_ctx->prev_odm_pipe &&
2867                         pipe_ctx->stream &&
2868                         pipe_ctx->stream == stream) {
2869                         top_pipe_to_program = pipe_ctx;
2870                 }
2871         }
2872
2873 #ifdef CONFIG_DRM_AMD_DC_DCN
2874         if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2875                 struct pipe_ctx *mpcc_pipe;
2876                 struct pipe_ctx *odm_pipe;
2877
2878                 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2879                         for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2880                                 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2881         }
2882 #endif
2883
2884         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2885                 if (top_pipe_to_program &&
2886                         top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2887                         if (should_use_dmub_lock(stream->link)) {
2888                                 union dmub_hw_lock_flags hw_locks = { 0 };
2889                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2890
2891                                 hw_locks.bits.lock_dig = 1;
2892                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2893
2894                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2895                                                         true,
2896                                                         &hw_locks,
2897                                                         &inst_flags);
2898                         } else
2899                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2900                                                 top_pipe_to_program->stream_res.tg);
2901                 }
2902
2903         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2904                 dc->hwss.interdependent_update_lock(dc, context, true);
2905         else
2906                 /* Lock the top pipe while updating plane addrs, since freesync requires
2907                  *  plane addr update event triggers to be synchronized.
2908                  *  top_pipe_to_program is expected to never be NULL
2909                  */
2910                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2911
2912         // Stream updates
2913         if (stream_update)
2914                 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2915
2916         if (surface_count == 0) {
2917                 /*
2918                  * In case of turning off screen, no need to program front end a second time.
2919                  * just return after program blank.
2920                  */
2921                 if (dc->hwss.apply_ctx_for_surface)
2922                         dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2923                 if (dc->hwss.program_front_end_for_ctx)
2924                         dc->hwss.program_front_end_for_ctx(dc, context);
2925
2926                 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
2927                         dc->hwss.interdependent_update_lock(dc, context, false);
2928                 else
2929                         dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2930                 dc->hwss.post_unlock_program_front_end(dc, context);
2931                 return;
2932         }
2933
2934         if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2935                 for (i = 0; i < surface_count; i++) {
2936                         struct dc_plane_state *plane_state = srf_updates[i].surface;
2937                         /*set logical flag for lock/unlock use*/
2938                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2939                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2940                                 if (!pipe_ctx->plane_state)
2941                                         continue;
2942                                 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
2943                                         continue;
2944                                 pipe_ctx->plane_state->triplebuffer_flips = false;
2945                                 if (update_type == UPDATE_TYPE_FAST &&
2946                                         dc->hwss.program_triplebuffer != NULL &&
2947                                         !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2948                                                 /*triple buffer for VUpdate  only*/
2949                                                 pipe_ctx->plane_state->triplebuffer_flips = true;
2950                                 }
2951                         }
2952                         if (update_type == UPDATE_TYPE_FULL) {
2953                                 /* force vsync flip when reconfiguring pipes to prevent underflow */
2954                                 plane_state->flip_immediate = false;
2955                         }
2956                 }
2957         }
2958
2959         // Update Type FULL, Surface updates
2960         for (j = 0; j < dc->res_pool->pipe_count; j++) {
2961                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2962
2963                 if (!pipe_ctx->top_pipe &&
2964                         !pipe_ctx->prev_odm_pipe &&
2965                         should_update_pipe_for_stream(context, pipe_ctx, stream)) {
2966                         struct dc_stream_status *stream_status = NULL;
2967
2968                         if (!pipe_ctx->plane_state)
2969                                 continue;
2970
2971                         /* Full fe update*/
2972                         if (update_type == UPDATE_TYPE_FAST)
2973                                 continue;
2974
2975                         ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2976
2977                         if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2978                                 /*turn off triple buffer for full update*/
2979                                 dc->hwss.program_triplebuffer(
2980                                         dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2981                         }
2982                         stream_status =
2983                                 stream_get_status(context, pipe_ctx->stream);
2984
2985                         if (dc->hwss.apply_ctx_for_surface)
2986                                 dc->hwss.apply_ctx_for_surface(
2987                                         dc, pipe_ctx->stream, stream_status->plane_count, context);
2988                 }
2989         }
2990         if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
2991                 dc->hwss.program_front_end_for_ctx(dc, context);
2992 #ifdef CONFIG_DRM_AMD_DC_DCN
2993                 if (dc->debug.validate_dml_output) {
2994                         for (i = 0; i < dc->res_pool->pipe_count; i++) {
2995                                 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
2996                                 if (cur_pipe->stream == NULL)
2997                                         continue;
2998
2999                                 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3000                                                 cur_pipe->plane_res.hubp, dc->ctx,
3001                                                 &context->res_ctx.pipe_ctx[i].rq_regs,
3002                                                 &context->res_ctx.pipe_ctx[i].dlg_regs,
3003                                                 &context->res_ctx.pipe_ctx[i].ttu_regs);
3004                         }
3005                 }
3006 #endif
3007         }
3008
3009         // Update Type FAST, Surface updates
3010         if (update_type == UPDATE_TYPE_FAST) {
3011                 if (dc->hwss.set_flip_control_gsl)
3012                         for (i = 0; i < surface_count; i++) {
3013                                 struct dc_plane_state *plane_state = srf_updates[i].surface;
3014
3015                                 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3016                                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3017
3018                                         if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3019                                                 continue;
3020
3021                                         if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3022                                                 continue;
3023
3024                                         // GSL has to be used for flip immediate
3025                                         dc->hwss.set_flip_control_gsl(pipe_ctx,
3026                                                         pipe_ctx->plane_state->flip_immediate);
3027                                 }
3028                         }
3029
3030                 /* Perform requested Updates */
3031                 for (i = 0; i < surface_count; i++) {
3032                         struct dc_plane_state *plane_state = srf_updates[i].surface;
3033
3034                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3035                                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3036
3037                                 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3038                                         continue;
3039
3040                                 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3041                                         continue;
3042
3043                                 /*program triple buffer after lock based on flip type*/
3044                                 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3045                                         /*only enable triplebuffer for  fast_update*/
3046                                         dc->hwss.program_triplebuffer(
3047                                                 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3048                                 }
3049                                 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3050                                         dc->hwss.update_plane_addr(dc, pipe_ctx);
3051                         }
3052                 }
3053
3054         }
3055
3056         if (should_lock_all_pipes && dc->hwss.interdependent_update_lock)
3057                 dc->hwss.interdependent_update_lock(dc, context, false);
3058         else
3059                 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3060
3061         if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3062                 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3063                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3064                                         top_pipe_to_program->stream_res.tg,
3065                                         CRTC_STATE_VACTIVE);
3066                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3067                                         top_pipe_to_program->stream_res.tg,
3068                                         CRTC_STATE_VBLANK);
3069                         top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3070                                         top_pipe_to_program->stream_res.tg,
3071                                         CRTC_STATE_VACTIVE);
3072
3073                         if (stream && should_use_dmub_lock(stream->link)) {
3074                                 union dmub_hw_lock_flags hw_locks = { 0 };
3075                                 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3076
3077                                 hw_locks.bits.lock_dig = 1;
3078                                 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3079
3080                                 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3081                                                         false,
3082                                                         &hw_locks,
3083                                                         &inst_flags);
3084                         } else
3085                                 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3086                                         top_pipe_to_program->stream_res.tg);
3087                 }
3088
3089         if (update_type != UPDATE_TYPE_FAST)
3090                 dc->hwss.post_unlock_program_front_end(dc, context);
3091
3092         // Fire manual trigger only when bottom plane is flipped
3093         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3094                 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3095
3096                 if (!pipe_ctx->plane_state)
3097                         continue;
3098
3099                 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3100                                 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3101                                 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3102                                 pipe_ctx->plane_state->skip_manual_trigger)
3103                         continue;
3104
3105                 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3106                         pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3107         }
3108 }
3109
3110 void dc_commit_updates_for_stream(struct dc *dc,
3111                 struct dc_surface_update *srf_updates,
3112                 int surface_count,
3113                 struct dc_stream_state *stream,
3114                 struct dc_stream_update *stream_update,
3115                 struct dc_state *state)
3116 {
3117         const struct dc_stream_status *stream_status;
3118         enum surface_update_type update_type;
3119         struct dc_state *context;
3120         struct dc_context *dc_ctx = dc->ctx;
3121         int i, j;
3122
3123         stream_status = dc_stream_get_status(stream);
3124         context = dc->current_state;
3125
3126         update_type = dc_check_update_surfaces_for_stream(
3127                                 dc, srf_updates, surface_count, stream_update, stream_status);
3128
3129         if (update_type >= update_surface_trace_level)
3130                 update_surface_trace(dc, srf_updates, surface_count);
3131
3132
3133         if (update_type >= UPDATE_TYPE_FULL) {
3134
3135                 /* initialize scratch memory for building context */
3136                 context = dc_create_state(dc);
3137                 if (context == NULL) {
3138                         DC_ERROR("Failed to allocate new validate context!\n");
3139                         return;
3140                 }
3141
3142                 dc_resource_state_copy_construct(state, context);
3143
3144                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3145                         struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3146                         struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3147
3148                         if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3149                                 new_pipe->plane_state->force_full_update = true;
3150                 }
3151         } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3152                 /*
3153                  * Previous frame finished and HW is ready for optimization.
3154                  *
3155                  * Only relevant for DCN behavior where we can guarantee the optimization
3156                  * is safe to apply - retain the legacy behavior for DCE.
3157                  */
3158                 dc_post_update_surfaces_to_stream(dc);
3159         }
3160
3161
3162         for (i = 0; i < surface_count; i++) {
3163                 struct dc_plane_state *surface = srf_updates[i].surface;
3164
3165                 copy_surface_update_to_plane(surface, &srf_updates[i]);
3166
3167                 if (update_type >= UPDATE_TYPE_MED) {
3168                         for (j = 0; j < dc->res_pool->pipe_count; j++) {
3169                                 struct pipe_ctx *pipe_ctx =
3170                                         &context->res_ctx.pipe_ctx[j];
3171
3172                                 if (pipe_ctx->plane_state != surface)
3173                                         continue;
3174
3175                                 resource_build_scaling_params(pipe_ctx);
3176                         }
3177                 }
3178         }
3179
3180         copy_stream_update_to_stream(dc, context, stream, stream_update);
3181
3182         if (update_type >= UPDATE_TYPE_FULL) {
3183                 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3184                         DC_ERROR("Mode validation failed for stream update!\n");
3185                         dc_release_state(context);
3186                         return;
3187                 }
3188         }
3189
3190         TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3191
3192         commit_planes_for_stream(
3193                                 dc,
3194                                 srf_updates,
3195                                 surface_count,
3196                                 stream,
3197                                 stream_update,
3198                                 update_type,
3199                                 context);
3200         /*update current_State*/
3201         if (dc->current_state != context) {
3202
3203                 struct dc_state *old = dc->current_state;
3204
3205                 dc->current_state = context;
3206                 dc_release_state(old);
3207
3208                 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3209                         struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3210
3211                         if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3212                                 pipe_ctx->plane_state->force_full_update = false;
3213                 }
3214         }
3215
3216         /* Legacy optimization path for DCE. */
3217         if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3218                 dc_post_update_surfaces_to_stream(dc);
3219                 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3220         }
3221
3222         return;
3223
3224 }
3225
3226 uint8_t dc_get_current_stream_count(struct dc *dc)
3227 {
3228         return dc->current_state->stream_count;
3229 }
3230
3231 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3232 {
3233         if (i < dc->current_state->stream_count)
3234                 return dc->current_state->streams[i];
3235         return NULL;
3236 }
3237
3238 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
3239 {
3240         uint8_t i;
3241         struct dc_context *ctx = link->ctx;
3242
3243         for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
3244                 if (ctx->dc->current_state->streams[i]->link == link)
3245                         return ctx->dc->current_state->streams[i];
3246         }
3247
3248         return NULL;
3249 }
3250
3251 enum dc_irq_source dc_interrupt_to_irq_source(
3252                 struct dc *dc,
3253                 uint32_t src_id,
3254                 uint32_t ext_id)
3255 {
3256         return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3257 }
3258
3259 /*
3260  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3261  */
3262 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3263 {
3264
3265         if (dc == NULL)
3266                 return false;
3267
3268         return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3269 }
3270
3271 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3272 {
3273         dal_irq_service_ack(dc->res_pool->irqs, src);
3274 }
3275
3276 void dc_power_down_on_boot(struct dc *dc)
3277 {
3278         if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3279                         dc->hwss.power_down_on_boot)
3280                 dc->hwss.power_down_on_boot(dc);
3281 }
3282
3283 void dc_set_power_state(
3284         struct dc *dc,
3285         enum dc_acpi_cm_power_state power_state)
3286 {
3287         struct kref refcount;
3288         struct display_mode_lib *dml;
3289
3290         if (!dc->current_state)
3291                 return;
3292
3293         switch (power_state) {
3294         case DC_ACPI_CM_POWER_STATE_D0:
3295                 dc_resource_state_construct(dc, dc->current_state);
3296
3297 #if defined(CONFIG_DRM_AMD_DC_DCN)
3298                 dc_z10_restore(dc);
3299 #endif
3300                 if (dc->ctx->dmub_srv)
3301                         dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3302
3303                 dc->hwss.init_hw(dc);
3304
3305                 if (dc->hwss.init_sys_ctx != NULL &&
3306                         dc->vm_pa_config.valid) {
3307                         dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3308                 }
3309
3310                 break;
3311         default:
3312                 ASSERT(dc->current_state->stream_count == 0);
3313                 /* Zero out the current context so that on resume we start with
3314                  * clean state, and dc hw programming optimizations will not
3315                  * cause any trouble.
3316                  */
3317                 dml = kzalloc(sizeof(struct display_mode_lib),
3318                                 GFP_KERNEL);
3319
3320                 ASSERT(dml);
3321                 if (!dml)
3322                         return;
3323
3324                 /* Preserve refcount */
3325                 refcount = dc->current_state->refcount;
3326                 /* Preserve display mode lib */
3327                 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3328
3329                 dc_resource_state_destruct(dc->current_state);
3330                 memset(dc->current_state, 0,
3331                                 sizeof(*dc->current_state));
3332
3333                 dc->current_state->refcount = refcount;
3334                 dc->current_state->bw_ctx.dml = *dml;
3335
3336                 kfree(dml);
3337
3338                 break;
3339         }
3340 }
3341
3342 void dc_resume(struct dc *dc)
3343 {
3344         uint32_t i;
3345
3346         for (i = 0; i < dc->link_count; i++)
3347                 core_link_resume(dc->links[i]);
3348 }
3349
3350 bool dc_is_dmcu_initialized(struct dc *dc)
3351 {
3352         struct dmcu *dmcu = dc->res_pool->dmcu;
3353
3354         if (dmcu)
3355                 return dmcu->funcs->is_dmcu_initialized(dmcu);
3356         return false;
3357 }
3358
3359 bool dc_submit_i2c(
3360                 struct dc *dc,
3361                 uint32_t link_index,
3362                 struct i2c_command *cmd)
3363 {
3364
3365         struct dc_link *link = dc->links[link_index];
3366         struct ddc_service *ddc = link->ddc;
3367         return dce_i2c_submit_command(
3368                 dc->res_pool,
3369                 ddc->ddc_pin,
3370                 cmd);
3371 }
3372
3373 bool dc_submit_i2c_oem(
3374                 struct dc *dc,
3375                 struct i2c_command *cmd)
3376 {
3377         struct ddc_service *ddc = dc->res_pool->oem_device;
3378         return dce_i2c_submit_command(
3379                 dc->res_pool,
3380                 ddc->ddc_pin,
3381                 cmd);
3382 }
3383
3384 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3385 {
3386         if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3387                 BREAK_TO_DEBUGGER();
3388                 return false;
3389         }
3390
3391         dc_sink_retain(sink);
3392
3393         dc_link->remote_sinks[dc_link->sink_count] = sink;
3394         dc_link->sink_count++;
3395
3396         return true;
3397 }
3398
3399 /*
3400  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3401  *
3402  * EDID length is in bytes
3403  */
3404 struct dc_sink *dc_link_add_remote_sink(
3405                 struct dc_link *link,
3406                 const uint8_t *edid,
3407                 int len,
3408                 struct dc_sink_init_data *init_data)
3409 {
3410         struct dc_sink *dc_sink;
3411         enum dc_edid_status edid_status;
3412
3413         if (len > DC_MAX_EDID_BUFFER_SIZE) {
3414                 dm_error("Max EDID buffer size breached!\n");
3415                 return NULL;
3416         }
3417
3418         if (!init_data) {
3419                 BREAK_TO_DEBUGGER();
3420                 return NULL;
3421         }
3422
3423         if (!init_data->link) {
3424                 BREAK_TO_DEBUGGER();
3425                 return NULL;
3426         }
3427
3428         dc_sink = dc_sink_create(init_data);
3429
3430         if (!dc_sink)
3431                 return NULL;
3432
3433         memmove(dc_sink->dc_edid.raw_edid, edid, len);
3434         dc_sink->dc_edid.length = len;
3435
3436         if (!link_add_remote_sink_helper(
3437                         link,
3438                         dc_sink))
3439                 goto fail_add_sink;
3440
3441         edid_status = dm_helpers_parse_edid_caps(
3442                         link,
3443                         &dc_sink->dc_edid,
3444                         &dc_sink->edid_caps);
3445
3446         /*
3447          * Treat device as no EDID device if EDID
3448          * parsing fails
3449          */
3450         if (edid_status != EDID_OK) {
3451                 dc_sink->dc_edid.length = 0;
3452                 dm_error("Bad EDID, status%d!\n", edid_status);
3453         }
3454
3455         return dc_sink;
3456
3457 fail_add_sink:
3458         dc_sink_release(dc_sink);
3459         return NULL;
3460 }
3461
3462 /*
3463  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3464  *
3465  * Note that this just removes the struct dc_sink - it doesn't
3466  * program hardware or alter other members of dc_link
3467  */
3468 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3469 {
3470         int i;
3471
3472         if (!link->sink_count) {
3473                 BREAK_TO_DEBUGGER();
3474                 return;
3475         }
3476
3477         for (i = 0; i < link->sink_count; i++) {
3478                 if (link->remote_sinks[i] == sink) {
3479                         dc_sink_release(sink);
3480                         link->remote_sinks[i] = NULL;
3481
3482                         /* shrink array to remove empty place */
3483                         while (i < link->sink_count - 1) {
3484                                 link->remote_sinks[i] = link->remote_sinks[i+1];
3485                                 i++;
3486                         }
3487                         link->remote_sinks[i] = NULL;
3488                         link->sink_count--;
3489                         return;
3490                 }
3491         }
3492 }
3493
3494 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3495 {
3496         info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3497         info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3498         info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3499         info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3500         info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3501         info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3502         info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3503         info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3504         info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3505 }
3506 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3507 {
3508         if (dc->hwss.set_clock)
3509                 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3510         return DC_ERROR_UNEXPECTED;
3511 }
3512 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3513 {
3514         if (dc->hwss.get_clock)
3515                 dc->hwss.get_clock(dc, clock_type, clock_cfg);
3516 }
3517
3518 /* enable/disable eDP PSR without specify stream for eDP */
3519 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3520 {
3521         int i;
3522         bool allow_active;
3523
3524         for (i = 0; i < dc->current_state->stream_count ; i++) {
3525                 struct dc_link *link;
3526                 struct dc_stream_state *stream = dc->current_state->streams[i];
3527
3528                 link = stream->link;
3529                 if (!link)
3530                         continue;
3531
3532                 if (link->psr_settings.psr_feature_enabled) {
3533                         if (enable && !link->psr_settings.psr_allow_active) {
3534                                 allow_active = true;
3535                                 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
3536                                         return false;
3537                         } else if (!enable && link->psr_settings.psr_allow_active) {
3538                                 allow_active = false;
3539                                 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
3540                                         return false;
3541                         }
3542                 }
3543         }
3544
3545         return true;
3546 }
3547
3548 #if defined(CONFIG_DRM_AMD_DC_DCN)
3549
3550 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3551 {
3552         if (dc->debug.disable_idle_power_optimizations)
3553                 return;
3554
3555         if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
3556                 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3557                         return;
3558
3559         if (allow == dc->idle_optimizations_allowed)
3560                 return;
3561
3562         if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3563                 dc->idle_optimizations_allowed = allow;
3564 }
3565
3566 /*
3567  * blank all streams, and set min and max memory clock to
3568  * lowest and highest DPM level, respectively
3569  */
3570 void dc_unlock_memory_clock_frequency(struct dc *dc)
3571 {
3572         unsigned int i;
3573
3574         for (i = 0; i < MAX_PIPES; i++)
3575                 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3576                         core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3577
3578         dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3579         dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3580 }
3581
3582 /*
3583  * set min memory clock to the min required for current mode,
3584  * max to maxDPM, and unblank streams
3585  */
3586 void dc_lock_memory_clock_frequency(struct dc *dc)
3587 {
3588         unsigned int i;
3589
3590         dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3591         dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3592         dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3593
3594         for (i = 0; i < MAX_PIPES; i++)
3595                 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3596                         core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3597 }
3598
3599 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
3600 {
3601         struct dc_state *context = dc->current_state;
3602         struct hubp *hubp;
3603         struct pipe_ctx *pipe;
3604         int i;
3605
3606         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3607                 pipe = &context->res_ctx.pipe_ctx[i];
3608
3609                 if (pipe->stream != NULL) {
3610                         dc->hwss.disable_pixel_data(dc, pipe, true);
3611
3612                         // wait for double buffer
3613                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3614                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
3615                         pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3616
3617                         hubp = pipe->plane_res.hubp;
3618                         hubp->funcs->set_blank_regs(hubp, true);
3619                 }
3620         }
3621
3622         dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
3623         dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
3624
3625         for (i = 0; i < dc->res_pool->pipe_count; i++) {
3626                 pipe = &context->res_ctx.pipe_ctx[i];
3627
3628                 if (pipe->stream != NULL) {
3629                         dc->hwss.disable_pixel_data(dc, pipe, false);
3630
3631                         hubp = pipe->plane_res.hubp;
3632                         hubp->funcs->set_blank_regs(hubp, false);
3633                 }
3634         }
3635 }
3636
3637
3638 /**
3639  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
3640  * @dc: pointer to dc of the dm calling this
3641  * @enable: True = transition to DC mode, false = transition back to AC mode
3642  *
3643  * Some SoCs define additional clock limits when in DC mode, DM should
3644  * invoke this function when the platform undergoes a power source transition
3645  * so DC can apply/unapply the limit. This interface may be disruptive to
3646  * the onscreen content.
3647  *
3648  * Context: Triggered by OS through DM interface, or manually by escape calls.
3649  * Need to hold a dclock when doing so.
3650  *
3651  * Return: none (void function)
3652  *
3653  */
3654 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
3655 {
3656         uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
3657         unsigned int softMax, maxDPM, funcMin;
3658         bool p_state_change_support;
3659
3660         if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
3661                 return;
3662
3663         softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
3664         maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
3665         funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
3666         p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
3667
3668         if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
3669                 if (p_state_change_support) {
3670                         if (funcMin <= softMax)
3671                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
3672                         // else: No-Op
3673                 } else {
3674                         if (funcMin <= softMax)
3675                                 blank_and_force_memclk(dc, true, softMax);
3676                         // else: No-Op
3677                 }
3678         } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
3679                 if (p_state_change_support) {
3680                         if (funcMin <= softMax)
3681                                 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
3682                         // else: No-Op
3683                 } else {
3684                         if (funcMin <= softMax)
3685                                 blank_and_force_memclk(dc, true, maxDPM);
3686                         // else: No-Op
3687                 }
3688         }
3689         dc->clk_mgr->dc_mode_softmax_enabled = enable;
3690 }
3691 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3692                 struct dc_cursor_attributes *cursor_attr)
3693 {
3694         if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3695                 return true;
3696         return false;
3697 }
3698
3699 /* cleanup on driver unload */
3700 void dc_hardware_release(struct dc *dc)
3701 {
3702         if (dc->hwss.hardware_release)
3703                 dc->hwss.hardware_release(dc);
3704 }
3705 #endif
3706
3707 /**
3708  * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
3709  * @dc: dc structure
3710  *
3711  * Returns: True to enable dmub notifications, False otherwise
3712  */
3713 bool dc_enable_dmub_notifications(struct dc *dc)
3714 {
3715 #if defined(CONFIG_DRM_AMD_DC_DCN)
3716         /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
3717         if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
3718             dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
3719             !dc->debug.dpia_debug.bits.disable_dpia)
3720                 return true;
3721 #endif
3722         /* dmub aux needs dmub notifications to be enabled */
3723         return dc->debug.enable_dmub_aux_for_legacy_ddc;
3724 }
3725
3726 /**
3727  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
3728  *                                      Sets port index appropriately for legacy DDC
3729  * @dc: dc structure
3730  * @link_index: link index
3731  * @payload: aux payload
3732  *
3733  * Returns: True if successful, False if failure
3734  */
3735 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3736                                 uint32_t link_index,
3737                                 struct aux_payload *payload)
3738 {
3739         uint8_t action;
3740         union dmub_rb_cmd cmd = {0};
3741         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3742
3743         ASSERT(payload->length <= 16);
3744
3745         cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3746         cmd.dp_aux_access.header.payload_bytes = 0;
3747         /* For dpia, ddc_pin is set to NULL */
3748         if (!dc->links[link_index]->ddc->ddc_pin)
3749                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
3750         else
3751                 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3752
3753         cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3754         cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3755         cmd.dp_aux_access.aux_control.timeout = 0;
3756         cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3757         cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3758         cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3759
3760         /* set aux action */
3761         if (payload->i2c_over_aux) {
3762                 if (payload->write) {
3763                         if (payload->mot)
3764                                 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3765                         else
3766                                 action = DP_AUX_REQ_ACTION_I2C_WRITE;
3767                 } else {
3768                         if (payload->mot)
3769                                 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3770                         else
3771                                 action = DP_AUX_REQ_ACTION_I2C_READ;
3772                         }
3773         } else {
3774                 if (payload->write)
3775                         action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3776                 else
3777                         action = DP_AUX_REQ_ACTION_DPCD_READ;
3778         }
3779
3780         cmd.dp_aux_access.aux_control.dpaux.action = action;
3781
3782         if (payload->length && payload->write) {
3783                 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3784                         payload->data,
3785                         payload->length
3786                         );
3787         }
3788
3789         dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3790         dc_dmub_srv_cmd_execute(dmub_srv);
3791         dc_dmub_srv_wait_idle(dmub_srv);
3792
3793         return true;
3794 }
3795
3796 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
3797                                             uint8_t dpia_port_index)
3798 {
3799         uint8_t index, link_index = 0xFF;
3800
3801         for (index = 0; index < dc->link_count; index++) {
3802                 /* ddc_hw_inst has dpia port index for dpia links
3803                  * and ddc instance for legacy links
3804                  */
3805                 if (!dc->links[index]->ddc->ddc_pin) {
3806                         if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
3807                                 link_index = index;
3808                                 break;
3809                         }
3810                 }
3811         }
3812         ASSERT(link_index != 0xFF);
3813         return link_index;
3814 }
3815
3816 /**
3817  *****************************************************************************
3818  *  Function: dc_process_dmub_set_config_async
3819  *
3820  *  @brief
3821  *              Submits set_config command to dmub via inbox message
3822  *
3823  *  @param
3824  *              [in] dc: dc structure
3825  *              [in] link_index: link index
3826  *              [in] payload: aux payload
3827  *              [out] notify: set_config immediate reply
3828  *
3829  *      @return
3830  *              True if successful, False if failure
3831  *****************************************************************************
3832  */
3833 bool dc_process_dmub_set_config_async(struct dc *dc,
3834                                 uint32_t link_index,
3835                                 struct set_config_cmd_payload *payload,
3836                                 struct dmub_notification *notify)
3837 {
3838         union dmub_rb_cmd cmd = {0};
3839         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3840         bool is_cmd_complete = true;
3841
3842         /* prepare SET_CONFIG command */
3843         cmd.set_config_access.header.type = DMUB_CMD__DPIA;
3844         cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
3845
3846         cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
3847         cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
3848         cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
3849
3850         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
3851                 /* command is not processed by dmub */
3852                 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
3853                 return is_cmd_complete;
3854         }
3855
3856         /* command processed by dmub, if ret_status is 1, it is completed instantly */
3857         if (cmd.set_config_access.header.ret_status == 1)
3858                 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
3859         else
3860                 /* cmd pending, will receive notification via outbox */
3861                 is_cmd_complete = false;
3862
3863         return is_cmd_complete;
3864 }
3865
3866 /**
3867  *****************************************************************************
3868  *  Function: dc_process_dmub_set_mst_slots
3869  *
3870  *  @brief
3871  *              Submits mst slot allocation command to dmub via inbox message
3872  *
3873  *  @param
3874  *              [in] dc: dc structure
3875  *              [in] link_index: link index
3876  *              [in] mst_alloc_slots: mst slots to be allotted
3877  *              [out] mst_slots_in_use: mst slots in use returned in failure case
3878  *
3879  *      @return
3880  *              DC_OK if successful, DC_ERROR if failure
3881  *****************************************************************************
3882  */
3883 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
3884                                 uint32_t link_index,
3885                                 uint8_t mst_alloc_slots,
3886                                 uint8_t *mst_slots_in_use)
3887 {
3888         union dmub_rb_cmd cmd = {0};
3889         struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3890
3891         /* prepare MST_ALLOC_SLOTS command */
3892         cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
3893         cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
3894
3895         cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
3896         cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
3897
3898         if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
3899                 /* command is not processed by dmub */
3900                 return DC_ERROR_UNEXPECTED;
3901
3902         /* command processed by dmub, if ret_status is 1 */
3903         if (cmd.set_config_access.header.ret_status != 1)
3904                 /* command processing error */
3905                 return DC_ERROR_UNEXPECTED;
3906
3907         /* command processed and we have a status of 2, mst not enabled in dpia */
3908         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
3909                 return DC_FAIL_UNSUPPORTED_1;
3910
3911         /* previously configured mst alloc and used slots did not match */
3912         if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
3913                 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
3914                 return DC_NOT_SUPPORTED;
3915         }
3916
3917         return DC_OK;
3918 }
3919
3920 /**
3921  * dc_disable_accelerated_mode - disable accelerated mode
3922  * @dc: dc structure
3923  */
3924 void dc_disable_accelerated_mode(struct dc *dc)
3925 {
3926         bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3927 }
3928
3929
3930 /**
3931  *****************************************************************************
3932  *  dc_notify_vsync_int_state() - notifies vsync enable/disable state
3933  *  @dc: dc structure
3934  *      @stream: stream where vsync int state changed
3935  *      @enable: whether vsync is enabled or disabled
3936  *
3937  *  Called when vsync is enabled/disabled
3938  *      Will notify DMUB to start/stop ABM interrupts after steady state is reached
3939  *
3940  *****************************************************************************
3941  */
3942 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
3943 {
3944         int i;
3945         int edp_num;
3946         struct pipe_ctx *pipe = NULL;
3947         struct dc_link *link = stream->sink->link;
3948         struct dc_link *edp_links[MAX_NUM_EDP];
3949
3950
3951         if (link->psr_settings.psr_feature_enabled)
3952                 return;
3953
3954         /*find primary pipe associated with stream*/
3955         for (i = 0; i < MAX_PIPES; i++) {
3956                 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3957
3958                 if (pipe->stream == stream && pipe->stream_res.tg)
3959                         break;
3960         }
3961
3962         if (i == MAX_PIPES) {
3963                 ASSERT(0);
3964                 return;
3965         }
3966
3967         get_edp_links(dc, edp_links, &edp_num);
3968
3969         /* Determine panel inst */
3970         for (i = 0; i < edp_num; i++) {
3971                 if (edp_links[i] == link)
3972                         break;
3973         }
3974
3975         if (i == edp_num) {
3976                 return;
3977         }
3978
3979         if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
3980                 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
3981 }