drm/msm/dpu: remove display H_TILE from encoder
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_rm.c
1 /*
2  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14
15 #define pr_fmt(fmt)     "[drm:%s] " fmt, __func__
16 #include "dpu_kms.h"
17 #include "dpu_hw_lm.h"
18 #include "dpu_hw_ctl.h"
19 #include "dpu_hw_pingpong.h"
20 #include "dpu_hw_intf.h"
21 #include "dpu_encoder.h"
22 #include "dpu_trace.h"
23
24 #define RESERVED_BY_OTHER(h, r) \
25         ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
26
27 #define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
28                                 (t).num_comp_enc == (r).num_enc && \
29                                 (t).num_intf == (r).num_intf)
30
31 struct dpu_rm_topology_def {
32         enum dpu_rm_topology_name top_name;
33         int num_lm;
34         int num_comp_enc;
35         int num_intf;
36         int num_ctl;
37         int needs_split_display;
38 };
39
40 static const struct dpu_rm_topology_def g_top_table[] = {
41         {   DPU_RM_TOPOLOGY_NONE,                 0, 0, 0, 0, false },
42         {   DPU_RM_TOPOLOGY_SINGLEPIPE,           1, 0, 1, 1, false },
43         {   DPU_RM_TOPOLOGY_DUALPIPE,             2, 0, 2, 2, true  },
44         {   DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,     2, 0, 1, 1, false },
45 };
46
47 /**
48  * struct dpu_rm_requirements - Reservation requirements parameter bundle
49  * @top:       selected topology for the display
50  * @hw_res:        Hardware resources required as reported by the encoders
51  */
52 struct dpu_rm_requirements {
53         const struct dpu_rm_topology_def *topology;
54         struct dpu_encoder_hw_resources hw_res;
55 };
56
57 /**
58  * struct dpu_rm_rsvp - Use Case Reservation tagging structure
59  *      Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
60  *      By using as a tag, rather than lists of pointers to HW blocks used
61  *      we can avoid some list management since we don't know how many blocks
62  *      of each type a given use case may require.
63  * @list:       List head for list of all reservations
64  * @seq:        Global RSVP sequence number for debugging, especially for
65  *              differentiating differenct allocations for same encoder.
66  * @enc_id:     Reservations are tracked by Encoder DRM object ID.
67  *              CRTCs may be connected to multiple Encoders.
68  *              An encoder or connector id identifies the display path.
69  * @topology    DRM<->HW topology use case
70  */
71 struct dpu_rm_rsvp {
72         struct list_head list;
73         uint32_t seq;
74         uint32_t enc_id;
75         enum dpu_rm_topology_name topology;
76 };
77
78 /**
79  * struct dpu_rm_hw_blk - hardware block tracking list member
80  * @list:       List head for list of all hardware blocks tracking items
81  * @rsvp:       Pointer to use case reservation if reserved by a client
82  * @rsvp_nxt:   Temporary pointer used during reservation to the incoming
83  *              request. Will be swapped into rsvp if proposal is accepted
84  * @type:       Type of hardware block this structure tracks
85  * @id:         Hardware ID number, within it's own space, ie. LM_X
86  * @catalog:    Pointer to the hardware catalog entry for this block
87  * @hw:         Pointer to the hardware register access object for this block
88  */
89 struct dpu_rm_hw_blk {
90         struct list_head list;
91         struct dpu_rm_rsvp *rsvp;
92         struct dpu_rm_rsvp *rsvp_nxt;
93         enum dpu_hw_blk_type type;
94         uint32_t id;
95         struct dpu_hw_blk *hw;
96 };
97
98 /**
99  * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
100  */
101 enum dpu_rm_dbg_rsvp_stage {
102         DPU_RM_STAGE_BEGIN,
103         DPU_RM_STAGE_AFTER_CLEAR,
104         DPU_RM_STAGE_AFTER_RSVPNEXT,
105         DPU_RM_STAGE_FINAL
106 };
107
108 static void _dpu_rm_print_rsvps(
109                 struct dpu_rm *rm,
110                 enum dpu_rm_dbg_rsvp_stage stage)
111 {
112         struct dpu_rm_rsvp *rsvp;
113         struct dpu_rm_hw_blk *blk;
114         enum dpu_hw_blk_type type;
115
116         DPU_DEBUG("%d\n", stage);
117
118         list_for_each_entry(rsvp, &rm->rsvps, list) {
119                 DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
120                               rsvp->enc_id, rsvp->topology);
121         }
122
123         for (type = 0; type < DPU_HW_BLK_MAX; type++) {
124                 list_for_each_entry(blk, &rm->hw_blks[type], list) {
125                         if (!blk->rsvp && !blk->rsvp_nxt)
126                                 continue;
127
128                         DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
129                                 (blk->rsvp) ? blk->rsvp->seq : 0,
130                                 (blk->rsvp) ? blk->rsvp->enc_id : 0,
131                                 (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
132                                 (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
133                                 blk->type, blk->id);
134                 }
135         }
136 }
137
138 struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
139 {
140         return rm->hw_mdp;
141 }
142
143 enum dpu_rm_topology_name
144 dpu_rm_get_topology_name(struct msm_display_topology topology)
145 {
146         int i;
147
148         for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
149                 if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
150                         return g_top_table[i].top_name;
151
152         return DPU_RM_TOPOLOGY_NONE;
153 }
154
155 void dpu_rm_init_hw_iter(
156                 struct dpu_rm_hw_iter *iter,
157                 uint32_t enc_id,
158                 enum dpu_hw_blk_type type)
159 {
160         memset(iter, 0, sizeof(*iter));
161         iter->enc_id = enc_id;
162         iter->type = type;
163 }
164
165 static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
166 {
167         struct list_head *blk_list;
168
169         if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
170                 DPU_ERROR("invalid rm\n");
171                 return false;
172         }
173
174         i->hw = NULL;
175         blk_list = &rm->hw_blks[i->type];
176
177         if (i->blk && (&i->blk->list == blk_list)) {
178                 DPU_DEBUG("attempt resume iteration past last\n");
179                 return false;
180         }
181
182         i->blk = list_prepare_entry(i->blk, blk_list, list);
183
184         list_for_each_entry_continue(i->blk, blk_list, list) {
185                 struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
186
187                 if (i->blk->type != i->type) {
188                         DPU_ERROR("found incorrect block type %d on %d list\n",
189                                         i->blk->type, i->type);
190                         return false;
191                 }
192
193                 if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
194                         i->hw = i->blk->hw;
195                         DPU_DEBUG("found type %d id %d for enc %d\n",
196                                         i->type, i->blk->id, i->enc_id);
197                         return true;
198                 }
199         }
200
201         DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
202
203         return false;
204 }
205
206 bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
207 {
208         bool ret;
209
210         mutex_lock(&rm->rm_lock);
211         ret = _dpu_rm_get_hw_locked(rm, i);
212         mutex_unlock(&rm->rm_lock);
213
214         return ret;
215 }
216
217 static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
218 {
219         switch (type) {
220         case DPU_HW_BLK_LM:
221                 dpu_hw_lm_destroy(hw);
222                 break;
223         case DPU_HW_BLK_CTL:
224                 dpu_hw_ctl_destroy(hw);
225                 break;
226         case DPU_HW_BLK_PINGPONG:
227                 dpu_hw_pingpong_destroy(hw);
228                 break;
229         case DPU_HW_BLK_INTF:
230                 dpu_hw_intf_destroy(hw);
231                 break;
232         case DPU_HW_BLK_SSPP:
233                 /* SSPPs are not managed by the resource manager */
234         case DPU_HW_BLK_TOP:
235                 /* Top is a singleton, not managed in hw_blks list */
236         case DPU_HW_BLK_MAX:
237         default:
238                 DPU_ERROR("unsupported block type %d\n", type);
239                 break;
240         }
241 }
242
243 int dpu_rm_destroy(struct dpu_rm *rm)
244 {
245
246         struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
247         struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
248         enum dpu_hw_blk_type type;
249
250         if (!rm) {
251                 DPU_ERROR("invalid rm\n");
252                 return -EINVAL;
253         }
254
255         list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
256                 list_del(&rsvp_cur->list);
257                 kfree(rsvp_cur);
258         }
259
260
261         for (type = 0; type < DPU_HW_BLK_MAX; type++) {
262                 list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
263                                 list) {
264                         list_del(&hw_cur->list);
265                         _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
266                         kfree(hw_cur);
267                 }
268         }
269
270         dpu_hw_mdp_destroy(rm->hw_mdp);
271         rm->hw_mdp = NULL;
272
273         mutex_destroy(&rm->rm_lock);
274
275         return 0;
276 }
277
278 static int _dpu_rm_hw_blk_create(
279                 struct dpu_rm *rm,
280                 struct dpu_mdss_cfg *cat,
281                 void __iomem *mmio,
282                 enum dpu_hw_blk_type type,
283                 uint32_t id,
284                 void *hw_catalog_info)
285 {
286         struct dpu_rm_hw_blk *blk;
287         struct dpu_hw_mdp *hw_mdp;
288         void *hw;
289
290         hw_mdp = rm->hw_mdp;
291
292         switch (type) {
293         case DPU_HW_BLK_LM:
294                 hw = dpu_hw_lm_init(id, mmio, cat);
295                 break;
296         case DPU_HW_BLK_CTL:
297                 hw = dpu_hw_ctl_init(id, mmio, cat);
298                 break;
299         case DPU_HW_BLK_PINGPONG:
300                 hw = dpu_hw_pingpong_init(id, mmio, cat);
301                 break;
302         case DPU_HW_BLK_INTF:
303                 hw = dpu_hw_intf_init(id, mmio, cat);
304                 break;
305         case DPU_HW_BLK_SSPP:
306                 /* SSPPs are not managed by the resource manager */
307         case DPU_HW_BLK_TOP:
308                 /* Top is a singleton, not managed in hw_blks list */
309         case DPU_HW_BLK_MAX:
310         default:
311                 DPU_ERROR("unsupported block type %d\n", type);
312                 return -EINVAL;
313         }
314
315         if (IS_ERR_OR_NULL(hw)) {
316                 DPU_ERROR("failed hw object creation: type %d, err %ld\n",
317                                 type, PTR_ERR(hw));
318                 return -EFAULT;
319         }
320
321         blk = kzalloc(sizeof(*blk), GFP_KERNEL);
322         if (!blk) {
323                 _dpu_rm_hw_destroy(type, hw);
324                 return -ENOMEM;
325         }
326
327         blk->type = type;
328         blk->id = id;
329         blk->hw = hw;
330         list_add_tail(&blk->list, &rm->hw_blks[type]);
331
332         return 0;
333 }
334
335 int dpu_rm_init(struct dpu_rm *rm,
336                 struct dpu_mdss_cfg *cat,
337                 void __iomem *mmio,
338                 struct drm_device *dev)
339 {
340         int rc, i;
341         enum dpu_hw_blk_type type;
342
343         if (!rm || !cat || !mmio || !dev) {
344                 DPU_ERROR("invalid kms\n");
345                 return -EINVAL;
346         }
347
348         /* Clear, setup lists */
349         memset(rm, 0, sizeof(*rm));
350
351         mutex_init(&rm->rm_lock);
352
353         INIT_LIST_HEAD(&rm->rsvps);
354         for (type = 0; type < DPU_HW_BLK_MAX; type++)
355                 INIT_LIST_HEAD(&rm->hw_blks[type]);
356
357         rm->dev = dev;
358
359         /* Some of the sub-blocks require an mdptop to be created */
360         rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
361         if (IS_ERR_OR_NULL(rm->hw_mdp)) {
362                 rc = PTR_ERR(rm->hw_mdp);
363                 rm->hw_mdp = NULL;
364                 DPU_ERROR("failed: mdp hw not available\n");
365                 goto fail;
366         }
367
368         /* Interrogate HW catalog and create tracking items for hw blocks */
369         for (i = 0; i < cat->mixer_count; i++) {
370                 struct dpu_lm_cfg *lm = &cat->mixer[i];
371
372                 if (lm->pingpong == PINGPONG_MAX) {
373                         DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
374                         continue;
375                 }
376
377                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
378                                 cat->mixer[i].id, &cat->mixer[i]);
379                 if (rc) {
380                         DPU_ERROR("failed: lm hw not available\n");
381                         goto fail;
382                 }
383
384                 if (!rm->lm_max_width) {
385                         rm->lm_max_width = lm->sblk->maxwidth;
386                 } else if (rm->lm_max_width != lm->sblk->maxwidth) {
387                         /*
388                          * Don't expect to have hw where lm max widths differ.
389                          * If found, take the min.
390                          */
391                         DPU_ERROR("unsupported: lm maxwidth differs\n");
392                         if (rm->lm_max_width > lm->sblk->maxwidth)
393                                 rm->lm_max_width = lm->sblk->maxwidth;
394                 }
395         }
396
397         for (i = 0; i < cat->pingpong_count; i++) {
398                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
399                                 cat->pingpong[i].id, &cat->pingpong[i]);
400                 if (rc) {
401                         DPU_ERROR("failed: pp hw not available\n");
402                         goto fail;
403                 }
404         }
405
406         for (i = 0; i < cat->intf_count; i++) {
407                 if (cat->intf[i].type == INTF_NONE) {
408                         DPU_DEBUG("skip intf %d with type none\n", i);
409                         continue;
410                 }
411
412                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
413                                 cat->intf[i].id, &cat->intf[i]);
414                 if (rc) {
415                         DPU_ERROR("failed: intf hw not available\n");
416                         goto fail;
417                 }
418         }
419
420         for (i = 0; i < cat->ctl_count; i++) {
421                 rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
422                                 cat->ctl[i].id, &cat->ctl[i]);
423                 if (rc) {
424                         DPU_ERROR("failed: ctl hw not available\n");
425                         goto fail;
426                 }
427         }
428
429         return 0;
430
431 fail:
432         dpu_rm_destroy(rm);
433
434         return rc;
435 }
436
437 /**
438  * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
439  *      proposed use case requirements, incl. hardwired dependent blocks like
440  *      pingpong
441  * @rm: dpu resource manager handle
442  * @rsvp: reservation currently being created
443  * @reqs: proposed use case requirements
444  * @lm: proposed layer mixer, function checks if lm, and all other hardwired
445  *      blocks connected to the lm (pp) is available and appropriate
446  * @pp: output parameter, pingpong block attached to the layer mixer.
447  *      NULL if pp was not available, or not matching requirements.
448  * @primary_lm: if non-null, this function check if lm is compatible primary_lm
449  *              as well as satisfying all other requirements
450  * @Return: true if lm matches all requirements, false otherwise
451  */
452 static bool _dpu_rm_check_lm_and_get_connected_blks(
453                 struct dpu_rm *rm,
454                 struct dpu_rm_rsvp *rsvp,
455                 struct dpu_rm_requirements *reqs,
456                 struct dpu_rm_hw_blk *lm,
457                 struct dpu_rm_hw_blk **pp,
458                 struct dpu_rm_hw_blk *primary_lm)
459 {
460         const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
461         struct dpu_rm_hw_iter iter;
462
463         *pp = NULL;
464
465         DPU_DEBUG("check lm %d pp %d\n",
466                            lm_cfg->id, lm_cfg->pingpong);
467
468         /* Check if this layer mixer is a peer of the proposed primary LM */
469         if (primary_lm) {
470                 const struct dpu_lm_cfg *prim_lm_cfg =
471                                 to_dpu_hw_mixer(primary_lm->hw)->cap;
472
473                 if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
474                         DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
475                                         prim_lm_cfg->id);
476                         return false;
477                 }
478         }
479
480         /* Already reserved? */
481         if (RESERVED_BY_OTHER(lm, rsvp)) {
482                 DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
483                 return false;
484         }
485
486         dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
487         while (_dpu_rm_get_hw_locked(rm, &iter)) {
488                 if (iter.blk->id == lm_cfg->pingpong) {
489                         *pp = iter.blk;
490                         break;
491                 }
492         }
493
494         if (!*pp) {
495                 DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
496                 return false;
497         }
498
499         if (RESERVED_BY_OTHER(*pp, rsvp)) {
500                 DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
501                                 (*pp)->id);
502                 return false;
503         }
504
505         return true;
506 }
507
508 static int _dpu_rm_reserve_lms(
509                 struct dpu_rm *rm,
510                 struct dpu_rm_rsvp *rsvp,
511                 struct dpu_rm_requirements *reqs)
512
513 {
514         struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
515         struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
516         struct dpu_rm_hw_iter iter_i, iter_j;
517         int lm_count = 0;
518         int i, rc = 0;
519
520         if (!reqs->topology->num_lm) {
521                 DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
522                 return -EINVAL;
523         }
524
525         /* Find a primary mixer */
526         dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
527         while (lm_count != reqs->topology->num_lm &&
528                         _dpu_rm_get_hw_locked(rm, &iter_i)) {
529                 memset(&lm, 0, sizeof(lm));
530                 memset(&pp, 0, sizeof(pp));
531
532                 lm_count = 0;
533                 lm[lm_count] = iter_i.blk;
534
535                 if (!_dpu_rm_check_lm_and_get_connected_blks(
536                                 rm, rsvp, reqs, lm[lm_count],
537                                 &pp[lm_count], NULL))
538                         continue;
539
540                 ++lm_count;
541
542                 /* Valid primary mixer found, find matching peers */
543                 dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
544
545                 while (lm_count != reqs->topology->num_lm &&
546                                 _dpu_rm_get_hw_locked(rm, &iter_j)) {
547                         if (iter_i.blk == iter_j.blk)
548                                 continue;
549
550                         if (!_dpu_rm_check_lm_and_get_connected_blks(
551                                         rm, rsvp, reqs, iter_j.blk,
552                                         &pp[lm_count], iter_i.blk))
553                                 continue;
554
555                         lm[lm_count] = iter_j.blk;
556                         ++lm_count;
557                 }
558         }
559
560         if (lm_count != reqs->topology->num_lm) {
561                 DPU_DEBUG("unable to find appropriate mixers\n");
562                 return -ENAVAIL;
563         }
564
565         for (i = 0; i < ARRAY_SIZE(lm); i++) {
566                 if (!lm[i])
567                         break;
568
569                 lm[i]->rsvp_nxt = rsvp;
570                 pp[i]->rsvp_nxt = rsvp;
571
572                 trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
573                                          pp[i]->id);
574         }
575
576         return rc;
577 }
578
579 static int _dpu_rm_reserve_ctls(
580                 struct dpu_rm *rm,
581                 struct dpu_rm_rsvp *rsvp,
582                 const struct dpu_rm_topology_def *top)
583 {
584         struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
585         struct dpu_rm_hw_iter iter;
586         int i = 0;
587
588         memset(&ctls, 0, sizeof(ctls));
589
590         dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
591         while (_dpu_rm_get_hw_locked(rm, &iter)) {
592                 const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
593                 unsigned long features = ctl->caps->features;
594                 bool has_split_display;
595
596                 if (RESERVED_BY_OTHER(iter.blk, rsvp))
597                         continue;
598
599                 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
600
601                 DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
602
603                 if (top->needs_split_display != has_split_display)
604                         continue;
605
606                 ctls[i] = iter.blk;
607                 DPU_DEBUG("ctl %d match\n", iter.blk->id);
608
609                 if (++i == top->num_ctl)
610                         break;
611         }
612
613         if (i != top->num_ctl)
614                 return -ENAVAIL;
615
616         for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
617                 ctls[i]->rsvp_nxt = rsvp;
618                 trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
619                                           rsvp->enc_id);
620         }
621
622         return 0;
623 }
624
625 static int _dpu_rm_reserve_intf(
626                 struct dpu_rm *rm,
627                 struct dpu_rm_rsvp *rsvp,
628                 uint32_t id,
629                 enum dpu_hw_blk_type type)
630 {
631         struct dpu_rm_hw_iter iter;
632         int ret = 0;
633
634         /* Find the block entry in the rm, and note the reservation */
635         dpu_rm_init_hw_iter(&iter, 0, type);
636         while (_dpu_rm_get_hw_locked(rm, &iter)) {
637                 if (iter.blk->id != id)
638                         continue;
639
640                 if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
641                         DPU_ERROR("type %d id %d already reserved\n", type, id);
642                         return -ENAVAIL;
643                 }
644
645                 iter.blk->rsvp_nxt = rsvp;
646                 trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
647                                           rsvp->enc_id);
648                 break;
649         }
650
651         /* Shouldn't happen since intfs are fixed at probe */
652         if (!iter.hw) {
653                 DPU_ERROR("couldn't find type %d id %d\n", type, id);
654                 return -EINVAL;
655         }
656
657         return ret;
658 }
659
660 static int _dpu_rm_reserve_intf_related_hw(
661                 struct dpu_rm *rm,
662                 struct dpu_rm_rsvp *rsvp,
663                 struct dpu_encoder_hw_resources *hw_res)
664 {
665         int i, ret = 0;
666         u32 id;
667
668         for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
669                 if (hw_res->intfs[i] == INTF_MODE_NONE)
670                         continue;
671                 id = i + INTF_0;
672                 ret = _dpu_rm_reserve_intf(rm, rsvp, id,
673                                 DPU_HW_BLK_INTF);
674                 if (ret)
675                         return ret;
676         }
677
678         return ret;
679 }
680
681 static int _dpu_rm_make_next_rsvp(
682                 struct dpu_rm *rm,
683                 struct drm_encoder *enc,
684                 struct drm_crtc_state *crtc_state,
685                 struct drm_connector_state *conn_state,
686                 struct dpu_rm_rsvp *rsvp,
687                 struct dpu_rm_requirements *reqs)
688 {
689         int ret;
690         struct dpu_rm_topology_def topology;
691
692         /* Create reservation info, tag reserved blocks with it as we go */
693         rsvp->seq = ++rm->rsvp_next_seq;
694         rsvp->enc_id = enc->base.id;
695         rsvp->topology = reqs->topology->top_name;
696         list_add_tail(&rsvp->list, &rm->rsvps);
697
698         ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
699         if (ret) {
700                 DPU_ERROR("unable to find appropriate mixers\n");
701                 return ret;
702         }
703
704         /*
705          * Do assignment preferring to give away low-resource CTLs first:
706          * - Check mixers without Split Display
707          * - Only then allow to grab from CTLs with split display capability
708          */
709         _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
710         if (ret && !reqs->topology->needs_split_display) {
711                 memcpy(&topology, reqs->topology, sizeof(topology));
712                 topology.needs_split_display = true;
713                 _dpu_rm_reserve_ctls(rm, rsvp, &topology);
714         }
715         if (ret) {
716                 DPU_ERROR("unable to find appropriate CTL\n");
717                 return ret;
718         }
719
720         ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
721         if (ret)
722                 return ret;
723
724         return ret;
725 }
726
727 static int _dpu_rm_populate_requirements(
728                 struct dpu_rm *rm,
729                 struct drm_encoder *enc,
730                 struct drm_crtc_state *crtc_state,
731                 struct drm_connector_state *conn_state,
732                 struct dpu_rm_requirements *reqs,
733                 struct msm_display_topology req_topology)
734 {
735         int i;
736
737         memset(reqs, 0, sizeof(*reqs));
738
739         dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
740
741         for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
742                 if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
743                                         req_topology)) {
744                         reqs->topology = &g_top_table[i];
745                         break;
746                 }
747         }
748
749         if (!reqs->topology) {
750                 DPU_ERROR("invalid topology for the display\n");
751                 return -EINVAL;
752         }
753
754         DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
755                       reqs->topology->num_lm, reqs->topology->num_ctl,
756                       reqs->topology->top_name,
757                       reqs->topology->needs_split_display);
758
759         return 0;
760 }
761
762 static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
763                 struct dpu_rm *rm,
764                 struct drm_encoder *enc)
765 {
766         struct dpu_rm_rsvp *i;
767
768         if (!rm || !enc) {
769                 DPU_ERROR("invalid params\n");
770                 return NULL;
771         }
772
773         if (list_empty(&rm->rsvps))
774                 return NULL;
775
776         list_for_each_entry(i, &rm->rsvps, list)
777                 if (i->enc_id == enc->base.id)
778                         return i;
779
780         return NULL;
781 }
782
783 static struct drm_connector *_dpu_rm_get_connector(
784                 struct drm_encoder *enc)
785 {
786         struct drm_connector *conn = NULL;
787         struct list_head *connector_list =
788                         &enc->dev->mode_config.connector_list;
789
790         list_for_each_entry(conn, connector_list, head)
791                 if (conn->encoder == enc)
792                         return conn;
793
794         return NULL;
795 }
796
797 /**
798  * _dpu_rm_release_rsvp - release resources and release a reservation
799  * @rm: KMS handle
800  * @rsvp:       RSVP pointer to release and release resources for
801  */
802 static void _dpu_rm_release_rsvp(
803                 struct dpu_rm *rm,
804                 struct dpu_rm_rsvp *rsvp,
805                 struct drm_connector *conn)
806 {
807         struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
808         struct dpu_rm_hw_blk *blk;
809         enum dpu_hw_blk_type type;
810
811         if (!rsvp)
812                 return;
813
814         DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
815
816         list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
817                 if (rsvp == rsvp_c) {
818                         list_del(&rsvp_c->list);
819                         break;
820                 }
821         }
822
823         for (type = 0; type < DPU_HW_BLK_MAX; type++) {
824                 list_for_each_entry(blk, &rm->hw_blks[type], list) {
825                         if (blk->rsvp == rsvp) {
826                                 blk->rsvp = NULL;
827                                 DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
828                                                 rsvp->seq, rsvp->enc_id,
829                                                 blk->type, blk->id);
830                         }
831                         if (blk->rsvp_nxt == rsvp) {
832                                 blk->rsvp_nxt = NULL;
833                                 DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
834                                                 rsvp->seq, rsvp->enc_id,
835                                                 blk->type, blk->id);
836                         }
837                 }
838         }
839
840         kfree(rsvp);
841 }
842
843 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
844 {
845         struct dpu_rm_rsvp *rsvp;
846         struct drm_connector *conn;
847
848         if (!rm || !enc) {
849                 DPU_ERROR("invalid params\n");
850                 return;
851         }
852
853         mutex_lock(&rm->rm_lock);
854
855         rsvp = _dpu_rm_get_rsvp(rm, enc);
856         if (!rsvp) {
857                 DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
858                 goto end;
859         }
860
861         conn = _dpu_rm_get_connector(enc);
862         if (!conn) {
863                 DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
864                 goto end;
865         }
866
867         _dpu_rm_release_rsvp(rm, rsvp, conn);
868 end:
869         mutex_unlock(&rm->rm_lock);
870 }
871
872 static int _dpu_rm_commit_rsvp(
873                 struct dpu_rm *rm,
874                 struct dpu_rm_rsvp *rsvp,
875                 struct drm_connector_state *conn_state)
876 {
877         struct dpu_rm_hw_blk *blk;
878         enum dpu_hw_blk_type type;
879         int ret = 0;
880
881         /* Swap next rsvp to be the active */
882         for (type = 0; type < DPU_HW_BLK_MAX; type++) {
883                 list_for_each_entry(blk, &rm->hw_blks[type], list) {
884                         if (blk->rsvp_nxt) {
885                                 blk->rsvp = blk->rsvp_nxt;
886                                 blk->rsvp_nxt = NULL;
887                         }
888                 }
889         }
890
891         if (!ret)
892                 DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
893                               rsvp->topology);
894
895         return ret;
896 }
897
898 int dpu_rm_reserve(
899                 struct dpu_rm *rm,
900                 struct drm_encoder *enc,
901                 struct drm_crtc_state *crtc_state,
902                 struct drm_connector_state *conn_state,
903                 struct msm_display_topology topology,
904                 bool test_only)
905 {
906         struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
907         struct dpu_rm_requirements reqs;
908         int ret;
909
910         if (!rm || !enc || !crtc_state || !conn_state) {
911                 DPU_ERROR("invalid arguments\n");
912                 return -EINVAL;
913         }
914
915         /* Check if this is just a page-flip */
916         if (!drm_atomic_crtc_needs_modeset(crtc_state))
917                 return 0;
918
919         DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
920                       conn_state->connector->base.id, enc->base.id,
921                       crtc_state->crtc->base.id, test_only);
922
923         mutex_lock(&rm->rm_lock);
924
925         _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
926
927         ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
928                         conn_state, &reqs, topology);
929         if (ret) {
930                 DPU_ERROR("failed to populate hw requirements\n");
931                 goto end;
932         }
933
934         /*
935          * We only support one active reservation per-hw-block. But to implement
936          * transactional semantics for test-only, and for allowing failure while
937          * modifying your existing reservation, over the course of this
938          * function we can have two reservations:
939          * Current: Existing reservation
940          * Next: Proposed reservation. The proposed reservation may fail, or may
941          *       be discarded if in test-only mode.
942          * If reservation is successful, and we're not in test-only, then we
943          * replace the current with the next.
944          */
945         rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
946         if (!rsvp_nxt) {
947                 ret = -ENOMEM;
948                 goto end;
949         }
950
951         rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
952
953         /* Check the proposed reservation, store it in hw's "next" field */
954         ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
955                         rsvp_nxt, &reqs);
956
957         _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
958
959         if (ret) {
960                 DPU_ERROR("failed to reserve hw resources: %d\n", ret);
961                 _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
962         } else if (test_only) {
963                 /*
964                  * Normally, if test_only, test the reservation and then undo
965                  * However, if the user requests LOCK, then keep the reservation
966                  * made during the atomic_check phase.
967                  */
968                 DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
969                                 rsvp_nxt->seq, rsvp_nxt->enc_id);
970                 _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
971         } else {
972                 _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
973
974                 ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
975         }
976
977         _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
978
979 end:
980         mutex_unlock(&rm->rm_lock);
981
982         return ret;
983 }