drm/msm/dsi: make save_state/restore_state callbacks accept msm_dsi_phy
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / dsi / phy / dsi_phy.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/clk-provider.h>
7 #include <linux/platform_device.h>
8
9 #include "dsi_phy.h"
10
11 #define S_DIV_ROUND_UP(n, d)    \
12         (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
13
14 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
15                                 s32 min_result, bool even)
16 {
17         s32 v;
18
19         v = (tmax - tmin) * percent;
20         v = S_DIV_ROUND_UP(v, 100) + tmin;
21         if (even && (v & 0x1))
22                 return max_t(s32, min_result, v - 1);
23         else
24                 return max_t(s32, min_result, v);
25 }
26
27 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
28                                         s32 ui, s32 coeff, s32 pcnt)
29 {
30         s32 tmax, tmin, clk_z;
31         s32 temp;
32
33         /* reset */
34         temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
35         tmin = S_DIV_ROUND_UP(temp, ui) - 2;
36         if (tmin > 255) {
37                 tmax = 511;
38                 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
39         } else {
40                 tmax = 255;
41                 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
42         }
43
44         /* adjust */
45         temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
46         timing->clk_zero = clk_z + 8 - temp;
47 }
48
49 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
50                              struct msm_dsi_phy_clk_request *clk_req)
51 {
52         const unsigned long bit_rate = clk_req->bitclk_rate;
53         const unsigned long esc_rate = clk_req->escclk_rate;
54         s32 ui, lpx;
55         s32 tmax, tmin;
56         s32 pcnt0 = 10;
57         s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
58         s32 pcnt2 = 10;
59         s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
60         s32 coeff = 1000; /* Precision, should avoid overflow */
61         s32 temp;
62
63         if (!bit_rate || !esc_rate)
64                 return -EINVAL;
65
66         ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
67         lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
68
69         tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
70         tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
71         timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
72
73         temp = lpx / ui;
74         if (temp & 0x1)
75                 timing->hs_rqst = temp;
76         else
77                 timing->hs_rqst = max_t(s32, 0, temp - 2);
78
79         /* Calculate clk_zero after clk_prepare and hs_rqst */
80         dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
81
82         temp = 105 * coeff + 12 * ui - 20 * coeff;
83         tmax = S_DIV_ROUND_UP(temp, ui) - 2;
84         tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
85         timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
86
87         temp = 85 * coeff + 6 * ui;
88         tmax = S_DIV_ROUND_UP(temp, ui) - 2;
89         temp = 40 * coeff + 4 * ui;
90         tmin = S_DIV_ROUND_UP(temp, ui) - 2;
91         timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
92
93         tmax = 255;
94         temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
95         temp = 145 * coeff + 10 * ui - temp;
96         tmin = S_DIV_ROUND_UP(temp, ui) - 2;
97         timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
98
99         temp = 105 * coeff + 12 * ui - 20 * coeff;
100         tmax = S_DIV_ROUND_UP(temp, ui) - 2;
101         temp = 60 * coeff + 4 * ui;
102         tmin = DIV_ROUND_UP(temp, ui) - 2;
103         timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
104
105         tmax = 255;
106         tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
107         timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
108
109         tmax = 63;
110         temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
111         temp = 60 * coeff + 52 * ui - 24 * ui - temp;
112         tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
113         timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0,
114                                                        false);
115         tmax = 63;
116         temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
117         temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
118         temp += 8 * ui + lpx;
119         tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
120         if (tmin > tmax) {
121                 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
122                 timing->shared_timings.clk_pre = temp >> 1;
123                 timing->shared_timings.clk_pre_inc_by_2 = true;
124         } else {
125                 timing->shared_timings.clk_pre =
126                                 linear_inter(tmax, tmin, pcnt2, 0, false);
127                 timing->shared_timings.clk_pre_inc_by_2 = false;
128         }
129
130         timing->ta_go = 3;
131         timing->ta_sure = 0;
132         timing->ta_get = 4;
133
134         DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
135                 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
136                 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
137                 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
138                 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
139                 timing->hs_rqst);
140
141         return 0;
142 }
143
144 int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
145                                 struct msm_dsi_phy_clk_request *clk_req)
146 {
147         const unsigned long bit_rate = clk_req->bitclk_rate;
148         const unsigned long esc_rate = clk_req->escclk_rate;
149         s32 ui, ui_x8;
150         s32 tmax, tmin;
151         s32 pcnt0 = 50;
152         s32 pcnt1 = 50;
153         s32 pcnt2 = 10;
154         s32 pcnt3 = 30;
155         s32 pcnt4 = 10;
156         s32 pcnt5 = 2;
157         s32 coeff = 1000; /* Precision, should avoid overflow */
158         s32 hb_en, hb_en_ckln, pd_ckln, pd;
159         s32 val, val_ckln;
160         s32 temp;
161
162         if (!bit_rate || !esc_rate)
163                 return -EINVAL;
164
165         timing->hs_halfbyte_en = 0;
166         hb_en = 0;
167         timing->hs_halfbyte_en_ckln = 0;
168         hb_en_ckln = 0;
169         timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3;
170         pd_ckln = timing->hs_prep_dly_ckln;
171         timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1;
172         pd = timing->hs_prep_dly;
173
174         val = (hb_en << 2) + (pd << 1);
175         val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1);
176
177         ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
178         ui_x8 = ui << 3;
179
180         temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8);
181         tmin = max_t(s32, temp, 0);
182         temp = (95 * coeff - val_ckln * ui) / ui_x8;
183         tmax = max_t(s32, temp, 0);
184         timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
185
186         temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui;
187         tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
188         tmax = (tmin > 255) ? 511 : 255;
189         timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
190
191         tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
192         temp = 105 * coeff + 12 * ui - 20 * coeff;
193         tmax = (temp + 3 * ui) / ui_x8;
194         timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
195
196         temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8);
197         tmin = max_t(s32, temp, 0);
198         temp = (85 * coeff + 6 * ui - val * ui) / ui_x8;
199         tmax = max_t(s32, temp, 0);
200         timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
201
202         temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui;
203         tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3;
204         tmax = 255;
205         timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
206
207         tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8);
208         temp = 105 * coeff + 12 * ui - 20 * coeff;
209         tmax = (temp + 3 * ui) / ui_x8;
210         timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
211
212         temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
213         timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
214
215         tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
216         tmax = 255;
217         timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
218
219         temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
220         timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
221
222         temp = 60 * coeff + 52 * ui - 43 * ui;
223         tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
224         tmax = 63;
225         timing->shared_timings.clk_post =
226                                 linear_inter(tmax, tmin, pcnt2, 0, false);
227
228         temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui;
229         temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui;
230         temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
231                                 (((timing->hs_rqst_ckln << 3) + 8) * ui);
232         tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
233         tmax = 63;
234         if (tmin > tmax) {
235                 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
236                 timing->shared_timings.clk_pre = temp >> 1;
237                 timing->shared_timings.clk_pre_inc_by_2 = 1;
238         } else {
239                 timing->shared_timings.clk_pre =
240                                 linear_inter(tmax, tmin, pcnt2, 0, false);
241                 timing->shared_timings.clk_pre_inc_by_2 = 0;
242         }
243
244         timing->ta_go = 3;
245         timing->ta_sure = 0;
246         timing->ta_get = 4;
247
248         DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
249             timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
250             timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
251             timing->clk_trail, timing->clk_prepare, timing->hs_exit,
252             timing->hs_zero, timing->hs_prepare, timing->hs_trail,
253             timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
254             timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
255             timing->hs_prep_dly_ckln);
256
257         return 0;
258 }
259
260 int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
261         struct msm_dsi_phy_clk_request *clk_req)
262 {
263         const unsigned long bit_rate = clk_req->bitclk_rate;
264         const unsigned long esc_rate = clk_req->escclk_rate;
265         s32 ui, ui_x8;
266         s32 tmax, tmin;
267         s32 pcnt0 = 50;
268         s32 pcnt1 = 50;
269         s32 pcnt2 = 10;
270         s32 pcnt3 = 30;
271         s32 pcnt4 = 10;
272         s32 pcnt5 = 2;
273         s32 coeff = 1000; /* Precision, should avoid overflow */
274         s32 hb_en, hb_en_ckln;
275         s32 temp;
276
277         if (!bit_rate || !esc_rate)
278                 return -EINVAL;
279
280         timing->hs_halfbyte_en = 0;
281         hb_en = 0;
282         timing->hs_halfbyte_en_ckln = 0;
283         hb_en_ckln = 0;
284
285         ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
286         ui_x8 = ui << 3;
287
288         temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
289         tmin = max_t(s32, temp, 0);
290         temp = (95 * coeff) / ui_x8;
291         tmax = max_t(s32, temp, 0);
292         timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
293
294         temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
295         tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
296         tmax = (tmin > 255) ? 511 : 255;
297         timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
298
299         tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
300         temp = 105 * coeff + 12 * ui - 20 * coeff;
301         tmax = (temp + 3 * ui) / ui_x8;
302         timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
303
304         temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
305         tmin = max_t(s32, temp, 0);
306         temp = (85 * coeff + 6 * ui) / ui_x8;
307         tmax = max_t(s32, temp, 0);
308         timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
309
310         temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
311         tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
312         tmax = 255;
313         timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
314
315         tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
316         temp = 105 * coeff + 12 * ui - 20 * coeff;
317         tmax = (temp / ui_x8) - 1;
318         timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
319
320         temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
321         timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
322
323         tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
324         tmax = 255;
325         timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
326
327         temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
328         timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
329
330         temp = 60 * coeff + 52 * ui - 43 * ui;
331         tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
332         tmax = 63;
333         timing->shared_timings.clk_post =
334                 linear_inter(tmax, tmin, pcnt2, 0, false);
335
336         temp = 8 * ui + (timing->clk_prepare << 3) * ui;
337         temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
338         temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
339                 (((timing->hs_rqst_ckln << 3) + 8) * ui);
340         tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
341         tmax = 63;
342         if (tmin > tmax) {
343                 temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
344                 timing->shared_timings.clk_pre = temp >> 1;
345                 timing->shared_timings.clk_pre_inc_by_2 = 1;
346         } else {
347                 timing->shared_timings.clk_pre =
348                         linear_inter(tmax, tmin, pcnt2, 0, false);
349                         timing->shared_timings.clk_pre_inc_by_2 = 0;
350         }
351
352         timing->ta_go = 3;
353         timing->ta_sure = 0;
354         timing->ta_get = 4;
355
356         DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
357                 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
358                 timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
359                 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
360                 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
361                 timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
362                 timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
363                 timing->hs_prep_dly_ckln);
364
365         return 0;
366 }
367
368 int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
369         struct msm_dsi_phy_clk_request *clk_req)
370 {
371         const unsigned long bit_rate = clk_req->bitclk_rate;
372         const unsigned long esc_rate = clk_req->escclk_rate;
373         s32 ui, ui_x8;
374         s32 tmax, tmin;
375         s32 pcnt_clk_prep = 50;
376         s32 pcnt_clk_zero = 2;
377         s32 pcnt_clk_trail = 30;
378         s32 pcnt_hs_prep = 50;
379         s32 pcnt_hs_zero = 10;
380         s32 pcnt_hs_trail = 30;
381         s32 pcnt_hs_exit = 10;
382         s32 coeff = 1000; /* Precision, should avoid overflow */
383         s32 hb_en;
384         s32 temp;
385
386         if (!bit_rate || !esc_rate)
387                 return -EINVAL;
388
389         hb_en = 0;
390
391         ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
392         ui_x8 = ui << 3;
393
394         /* TODO: verify these calculations against latest downstream driver
395          * everything except clk_post/clk_pre uses calculations from v3 based
396          * on the downstream driver having the same calculations for v3 and v4
397          */
398
399         temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
400         tmin = max_t(s32, temp, 0);
401         temp = (95 * coeff) / ui_x8;
402         tmax = max_t(s32, temp, 0);
403         timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
404
405         temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
406         tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
407         tmax = (tmin > 255) ? 511 : 255;
408         timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
409
410         tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
411         temp = 105 * coeff + 12 * ui - 20 * coeff;
412         tmax = (temp + 3 * ui) / ui_x8;
413         timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
414
415         temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
416         tmin = max_t(s32, temp, 0);
417         temp = (85 * coeff + 6 * ui) / ui_x8;
418         tmax = max_t(s32, temp, 0);
419         timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
420
421         temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
422         tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
423         tmax = 255;
424         timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
425
426         tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
427         temp = 105 * coeff + 12 * ui - 20 * coeff;
428         tmax = (temp / ui_x8) - 1;
429         timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
430
431         temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
432         timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
433
434         tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
435         tmax = 255;
436         timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
437
438         /* recommended min
439          * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
440          */
441         temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
442         tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
443         tmax = 255;
444         timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
445
446         /* recommended min
447          * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
448          * val2 = (16 * bit_clk_ns)
449          * final = roundup(val1/val2, 0) - 1
450          */
451         temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
452         tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
453         tmax = 255;
454         timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
455
456         DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
457                 timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
458                 timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
459                 timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
460
461         return 0;
462 }
463
464 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
465                                 u32 bit_mask)
466 {
467         int phy_id = phy->id;
468         u32 val;
469
470         if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
471                 return;
472
473         val = dsi_phy_read(phy->base + reg);
474
475         if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
476                 dsi_phy_write(phy->base + reg, val | bit_mask);
477         else
478                 dsi_phy_write(phy->base + reg, val & (~bit_mask));
479 }
480
481 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
482 {
483         struct regulator_bulk_data *s = phy->supplies;
484         const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
485         struct device *dev = &phy->pdev->dev;
486         int num = phy->cfg->reg_cfg.num;
487         int i, ret;
488
489         for (i = 0; i < num; i++)
490                 s[i].supply = regs[i].name;
491
492         ret = devm_regulator_bulk_get(dev, num, s);
493         if (ret < 0) {
494                 if (ret != -EPROBE_DEFER) {
495                         DRM_DEV_ERROR(dev,
496                                       "%s: failed to init regulator, ret=%d\n",
497                                       __func__, ret);
498                 }
499
500                 return ret;
501         }
502
503         return 0;
504 }
505
506 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
507 {
508         struct regulator_bulk_data *s = phy->supplies;
509         const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
510         int num = phy->cfg->reg_cfg.num;
511         int i;
512
513         DBG("");
514         for (i = num - 1; i >= 0; i--)
515                 if (regs[i].disable_load >= 0)
516                         regulator_set_load(s[i].consumer, regs[i].disable_load);
517
518         regulator_bulk_disable(num, s);
519 }
520
521 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
522 {
523         struct regulator_bulk_data *s = phy->supplies;
524         const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
525         struct device *dev = &phy->pdev->dev;
526         int num = phy->cfg->reg_cfg.num;
527         int ret, i;
528
529         DBG("");
530         for (i = 0; i < num; i++) {
531                 if (regs[i].enable_load >= 0) {
532                         ret = regulator_set_load(s[i].consumer,
533                                                         regs[i].enable_load);
534                         if (ret < 0) {
535                                 DRM_DEV_ERROR(dev,
536                                         "regulator %d set op mode failed, %d\n",
537                                         i, ret);
538                                 goto fail;
539                         }
540                 }
541         }
542
543         ret = regulator_bulk_enable(num, s);
544         if (ret < 0) {
545                 DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
546                 goto fail;
547         }
548
549         return 0;
550
551 fail:
552         for (i--; i >= 0; i--)
553                 regulator_set_load(s[i].consumer, regs[i].disable_load);
554         return ret;
555 }
556
557 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
558 {
559         struct device *dev = &phy->pdev->dev;
560         int ret;
561
562         pm_runtime_get_sync(dev);
563
564         ret = clk_prepare_enable(phy->ahb_clk);
565         if (ret) {
566                 DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
567                 pm_runtime_put_sync(dev);
568         }
569
570         return ret;
571 }
572
573 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
574 {
575         clk_disable_unprepare(phy->ahb_clk);
576         pm_runtime_put_autosuspend(&phy->pdev->dev);
577 }
578
579 static const struct of_device_id dsi_phy_dt_match[] = {
580 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
581         { .compatible = "qcom,dsi-phy-28nm-hpm",
582           .data = &dsi_phy_28nm_hpm_cfgs },
583         { .compatible = "qcom,dsi-phy-28nm-hpm-fam-b",
584           .data = &dsi_phy_28nm_hpm_famb_cfgs },
585         { .compatible = "qcom,dsi-phy-28nm-lp",
586           .data = &dsi_phy_28nm_lp_cfgs },
587 #endif
588 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
589         { .compatible = "qcom,dsi-phy-20nm",
590           .data = &dsi_phy_20nm_cfgs },
591 #endif
592 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
593         { .compatible = "qcom,dsi-phy-28nm-8960",
594           .data = &dsi_phy_28nm_8960_cfgs },
595 #endif
596 #ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
597         { .compatible = "qcom,dsi-phy-14nm",
598           .data = &dsi_phy_14nm_cfgs },
599         { .compatible = "qcom,dsi-phy-14nm-660",
600           .data = &dsi_phy_14nm_660_cfgs },
601 #endif
602 #ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
603         { .compatible = "qcom,dsi-phy-10nm",
604           .data = &dsi_phy_10nm_cfgs },
605         { .compatible = "qcom,dsi-phy-10nm-8998",
606           .data = &dsi_phy_10nm_8998_cfgs },
607 #endif
608 #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
609         { .compatible = "qcom,dsi-phy-7nm",
610           .data = &dsi_phy_7nm_cfgs },
611         { .compatible = "qcom,dsi-phy-7nm-8150",
612           .data = &dsi_phy_7nm_8150_cfgs },
613 #endif
614         {}
615 };
616
617 /*
618  * Currently, we only support one SoC for each PHY type. When we have multiple
619  * SoCs for the same PHY, we can try to make the index searching a bit more
620  * clever.
621  */
622 static int dsi_phy_get_id(struct msm_dsi_phy *phy)
623 {
624         struct platform_device *pdev = phy->pdev;
625         const struct msm_dsi_phy_cfg *cfg = phy->cfg;
626         struct resource *res;
627         int i;
628
629         res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
630         if (!res)
631                 return -EINVAL;
632
633         for (i = 0; i < cfg->num_dsi_phy; i++) {
634                 if (cfg->io_start[i] == res->start)
635                         return i;
636         }
637
638         return -EINVAL;
639 }
640
641 static int dsi_phy_driver_probe(struct platform_device *pdev)
642 {
643         struct msm_dsi_phy *phy;
644         struct device *dev = &pdev->dev;
645         const struct of_device_id *match;
646         int ret;
647
648         phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
649         if (!phy)
650                 return -ENOMEM;
651
652         match = of_match_node(dsi_phy_dt_match, dev->of_node);
653         if (!match)
654                 return -ENODEV;
655
656         phy->provided_clocks = devm_kzalloc(dev,
657                         struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
658                         GFP_KERNEL);
659         if (!phy->provided_clocks)
660                 return -ENOMEM;
661
662         phy->provided_clocks->num = NUM_PROVIDED_CLKS;
663
664         phy->cfg = match->data;
665         phy->pdev = pdev;
666
667         phy->id = dsi_phy_get_id(phy);
668         if (phy->id < 0) {
669                 ret = phy->id;
670                 DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
671                         __func__, ret);
672                 goto fail;
673         }
674
675         phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
676                                 "qcom,dsi-phy-regulator-ldo-mode");
677
678         phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
679         if (IS_ERR(phy->base)) {
680                 DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
681                 ret = -ENOMEM;
682                 goto fail;
683         }
684
685         if (phy->cfg->has_phy_lane) {
686                 phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", "DSI_PHY_LANE");
687                 if (IS_ERR(phy->lane_base)) {
688                         DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
689                         ret = -ENOMEM;
690                         goto fail;
691                 }
692         }
693
694         if (phy->cfg->has_phy_regulator) {
695                 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
696                 if (IS_ERR(phy->reg_base)) {
697                         DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
698                         ret = -ENOMEM;
699                         goto fail;
700                 }
701         }
702
703         ret = dsi_phy_regulator_init(phy);
704         if (ret)
705                 goto fail;
706
707         phy->ahb_clk = msm_clk_get(pdev, "iface");
708         if (IS_ERR(phy->ahb_clk)) {
709                 DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
710                 ret = PTR_ERR(phy->ahb_clk);
711                 goto fail;
712         }
713
714         /* PLL init will call into clk_register which requires
715          * register access, so we need to enable power and ahb clock.
716          */
717         ret = dsi_phy_enable_resource(phy);
718         if (ret)
719                 goto fail;
720
721         if (phy->cfg->ops.pll_init) {
722                 ret = phy->cfg->ops.pll_init(phy);
723                 if (ret) {
724                         DRM_DEV_INFO(dev,
725                                 "%s: pll init failed: %d, need separate pll clk driver\n",
726                                 __func__, ret);
727                         goto fail;
728                 }
729         }
730
731         ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
732                                      phy->provided_clocks);
733         if (ret) {
734                 DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
735                 goto fail;
736         }
737
738         dsi_phy_disable_resource(phy);
739
740         platform_set_drvdata(pdev, phy);
741
742         return 0;
743
744 fail:
745         return ret;
746 }
747
748 static struct platform_driver dsi_phy_platform_driver = {
749         .probe      = dsi_phy_driver_probe,
750         .driver     = {
751                 .name   = "msm_dsi_phy",
752                 .of_match_table = dsi_phy_dt_match,
753         },
754 };
755
756 void __init msm_dsi_phy_driver_register(void)
757 {
758         platform_driver_register(&dsi_phy_platform_driver);
759 }
760
761 void __exit msm_dsi_phy_driver_unregister(void)
762 {
763         platform_driver_unregister(&dsi_phy_platform_driver);
764 }
765
766 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
767                         struct msm_dsi_phy_clk_request *clk_req)
768 {
769         struct device *dev = &phy->pdev->dev;
770         int ret;
771
772         if (!phy || !phy->cfg->ops.enable)
773                 return -EINVAL;
774
775         ret = dsi_phy_enable_resource(phy);
776         if (ret) {
777                 DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
778                         __func__, ret);
779                 goto res_en_fail;
780         }
781
782         ret = dsi_phy_regulator_enable(phy);
783         if (ret) {
784                 DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
785                         __func__, ret);
786                 goto reg_en_fail;
787         }
788
789         ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
790         if (ret) {
791                 DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
792                 goto phy_en_fail;
793         }
794
795         /*
796          * Resetting DSI PHY silently changes its PLL registers to reset status,
797          * which will confuse clock driver and result in wrong output rate of
798          * link clocks. Restore PLL status if its PLL is being used as clock
799          * source.
800          */
801         if (phy->usecase != MSM_DSI_PHY_SLAVE) {
802                 ret = msm_dsi_phy_pll_restore_state(phy);
803                 if (ret) {
804                         DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
805                                 __func__, ret);
806                         goto pll_restor_fail;
807                 }
808         }
809
810         return 0;
811
812 pll_restor_fail:
813         if (phy->cfg->ops.disable)
814                 phy->cfg->ops.disable(phy);
815 phy_en_fail:
816         dsi_phy_regulator_disable(phy);
817 reg_en_fail:
818         dsi_phy_disable_resource(phy);
819 res_en_fail:
820         return ret;
821 }
822
823 void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
824 {
825         if (!phy || !phy->cfg->ops.disable)
826                 return;
827
828         phy->cfg->ops.disable(phy);
829
830         dsi_phy_regulator_disable(phy);
831         dsi_phy_disable_resource(phy);
832 }
833
834 void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
835                         struct msm_dsi_phy_shared_timings *shared_timings)
836 {
837         memcpy(shared_timings, &phy->timing.shared_timings,
838                sizeof(*shared_timings));
839 }
840
841 void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
842                              enum msm_dsi_phy_usecase uc)
843 {
844         if (phy)
845                 phy->usecase = uc;
846 }
847
848 int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
849         struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
850 {
851         if (byte_clk_provider)
852                 *byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk;
853         if (pixel_clk_provider)
854                 *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
855
856         return -EINVAL;
857 }
858
859 void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
860 {
861         if (phy->cfg->ops.save_pll_state) {
862                 phy->cfg->ops.save_pll_state(phy);
863                 phy->state_saved = true;
864         }
865 }
866
867 int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
868 {
869         int ret;
870
871         if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
872                 ret = phy->cfg->ops.restore_pll_state(phy);
873                 if (ret)
874                         return ret;
875
876                 phy->state_saved = false;
877         }
878
879         return 0;
880 }