fbdev: Garbage collect fbdev scrolling acceleration, part 1 (from TODO list)
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / dsi / phy / dsi_phy_14nm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/clk.h>
7 #include <linux/clk-provider.h>
8 #include <linux/delay.h>
9
10 #include "dsi_phy.h"
11 #include "dsi.xml.h"
12 #include "dsi_phy_14nm.xml.h"
13
14 #define PHY_14NM_CKLN_IDX       4
15
16 /*
17  * DSI PLL 14nm - clock diagram (eg: DSI0):
18  *
19  *         dsi0n1_postdiv_clk
20  *                         |
21  *                         |
22  *                 +----+  |  +----+
23  *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
24  *                 +----+  |  +----+
25  *                         |           dsi0n1_postdivby2_clk
26  *                         |   +----+  |
27  *                         o---| /2 |--o--|\
28  *                         |   +----+     | \   +----+
29  *                         |              |  |--| n2 |-- dsi0pll
30  *                         o--------------| /   +----+
31  *                                        |/
32  */
33
34 #define POLL_MAX_READS                  15
35 #define POLL_TIMEOUT_US                 1000
36
37 #define VCO_REF_CLK_RATE                19200000
38 #define VCO_MIN_RATE                    1300000000UL
39 #define VCO_MAX_RATE                    2600000000UL
40
41 struct dsi_pll_config {
42         u64 vco_current_rate;
43
44         u32 ssc_en;     /* SSC enable/disable */
45
46         /* fixed params */
47         u32 plllock_cnt;
48         u32 ssc_center;
49         u32 ssc_adj_period;
50         u32 ssc_spread;
51         u32 ssc_freq;
52
53         /* calculated */
54         u32 dec_start;
55         u32 div_frac_start;
56         u32 ssc_period;
57         u32 ssc_step_size;
58         u32 plllock_cmp;
59         u32 pll_vco_div_ref;
60         u32 pll_vco_count;
61         u32 pll_kvco_div_ref;
62         u32 pll_kvco_count;
63 };
64
65 struct pll_14nm_cached_state {
66         unsigned long vco_rate;
67         u8 n2postdiv;
68         u8 n1postdiv;
69 };
70
71 struct dsi_pll_14nm {
72         struct clk_hw clk_hw;
73
74         struct msm_dsi_phy *phy;
75
76         /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
77         spinlock_t postdiv_lock;
78
79         struct pll_14nm_cached_state cached_state;
80
81         struct dsi_pll_14nm *slave;
82 };
83
84 #define to_pll_14nm(x)  container_of(x, struct dsi_pll_14nm, clk_hw)
85
86 /*
87  * Private struct for N1/N2 post-divider clocks. These clocks are similar to
88  * the generic clk_divider class of clocks. The only difference is that it
89  * also sets the slave DSI PLL's post-dividers if in bonded DSI mode
90  */
91 struct dsi_pll_14nm_postdiv {
92         struct clk_hw hw;
93
94         /* divider params */
95         u8 shift;
96         u8 width;
97         u8 flags; /* same flags as used by clk_divider struct */
98
99         struct dsi_pll_14nm *pll;
100 };
101
102 #define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
103
104 /*
105  * Global list of private DSI PLL struct pointers. We need this for bonded DSI
106  * mode, where the master PLL's clk_ops needs access the slave's private data
107  */
108 static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
109
110 static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
111                                     u32 nb_tries, u32 timeout_us)
112 {
113         bool pll_locked = false;
114         void __iomem *base = pll_14nm->phy->pll_base;
115         u32 tries, val;
116
117         tries = nb_tries;
118         while (tries--) {
119                 val = dsi_phy_read(base +
120                                REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
121                 pll_locked = !!(val & BIT(5));
122
123                 if (pll_locked)
124                         break;
125
126                 udelay(timeout_us);
127         }
128
129         if (!pll_locked) {
130                 tries = nb_tries;
131                 while (tries--) {
132                         val = dsi_phy_read(base +
133                                 REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
134                         pll_locked = !!(val & BIT(0));
135
136                         if (pll_locked)
137                                 break;
138
139                         udelay(timeout_us);
140                 }
141         }
142
143         DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
144
145         return pll_locked;
146 }
147
148 static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
149 {
150         /* fixed input */
151         pconf->plllock_cnt = 1;
152
153         /*
154          * SSC is enabled by default. We might need DT props for configuring
155          * some SSC params like PPM and center/down spread etc.
156          */
157         pconf->ssc_en = 1;
158         pconf->ssc_center = 0;          /* down spread by default */
159         pconf->ssc_spread = 5;          /* PPM / 1000 */
160         pconf->ssc_freq = 31500;        /* default recommended */
161         pconf->ssc_adj_period = 37;
162 }
163
164 #define CEIL(x, y)              (((x) + ((y) - 1)) / (y))
165
166 static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
167 {
168         u32 period, ssc_period;
169         u32 ref, rem;
170         u64 step_size;
171
172         DBG("vco=%lld ref=%d", pconf->vco_current_rate, VCO_REF_CLK_RATE);
173
174         ssc_period = pconf->ssc_freq / 500;
175         period = (u32)VCO_REF_CLK_RATE / 1000;
176         ssc_period  = CEIL(period, ssc_period);
177         ssc_period -= 1;
178         pconf->ssc_period = ssc_period;
179
180         DBG("ssc freq=%d spread=%d period=%d", pconf->ssc_freq,
181             pconf->ssc_spread, pconf->ssc_period);
182
183         step_size = (u32)pconf->vco_current_rate;
184         ref = VCO_REF_CLK_RATE;
185         ref /= 1000;
186         step_size = div_u64(step_size, ref);
187         step_size <<= 20;
188         step_size = div_u64(step_size, 1000);
189         step_size *= pconf->ssc_spread;
190         step_size = div_u64(step_size, 1000);
191         step_size *= (pconf->ssc_adj_period + 1);
192
193         rem = 0;
194         step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
195         if (rem)
196                 step_size++;
197
198         DBG("step_size=%lld", step_size);
199
200         step_size &= 0x0ffff;   /* take lower 16 bits */
201
202         pconf->ssc_step_size = step_size;
203 }
204
205 static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
206 {
207         u64 multiplier = BIT(20);
208         u64 dec_start_multiple, dec_start, pll_comp_val;
209         u32 duration, div_frac_start;
210         u64 vco_clk_rate = pconf->vco_current_rate;
211         u64 fref = VCO_REF_CLK_RATE;
212
213         DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
214
215         dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
216         div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
217
218         dec_start = div_u64(dec_start_multiple, multiplier);
219
220         pconf->dec_start = (u32)dec_start;
221         pconf->div_frac_start = div_frac_start;
222
223         if (pconf->plllock_cnt == 0)
224                 duration = 1024;
225         else if (pconf->plllock_cnt == 1)
226                 duration = 256;
227         else if (pconf->plllock_cnt == 2)
228                 duration = 128;
229         else
230                 duration = 32;
231
232         pll_comp_val = duration * dec_start_multiple;
233         pll_comp_val = div_u64(pll_comp_val, multiplier);
234         do_div(pll_comp_val, 10);
235
236         pconf->plllock_cmp = (u32)pll_comp_val;
237 }
238
239 static u32 pll_14nm_kvco_slop(u32 vrate)
240 {
241         u32 slop = 0;
242
243         if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
244                 slop =  600;
245         else if (vrate > 1800000000UL && vrate < 2300000000UL)
246                 slop = 400;
247         else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
248                 slop = 280;
249
250         return slop;
251 }
252
253 static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
254 {
255         u64 vco_clk_rate = pconf->vco_current_rate;
256         u64 fref = VCO_REF_CLK_RATE;
257         u32 vco_measure_time = 5;
258         u32 kvco_measure_time = 5;
259         u64 data;
260         u32 cnt;
261
262         data = fref * vco_measure_time;
263         do_div(data, 1000000);
264         data &= 0x03ff; /* 10 bits */
265         data -= 2;
266         pconf->pll_vco_div_ref = data;
267
268         data = div_u64(vco_clk_rate, 1000000);  /* unit is Mhz */
269         data *= vco_measure_time;
270         do_div(data, 10);
271         pconf->pll_vco_count = data;
272
273         data = fref * kvco_measure_time;
274         do_div(data, 1000000);
275         data &= 0x03ff; /* 10 bits */
276         data -= 1;
277         pconf->pll_kvco_div_ref = data;
278
279         cnt = pll_14nm_kvco_slop(vco_clk_rate);
280         cnt *= 2;
281         cnt /= 100;
282         cnt *= kvco_measure_time;
283         pconf->pll_kvco_count = cnt;
284 }
285
286 static void pll_db_commit_ssc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
287 {
288         void __iomem *base = pll->phy->pll_base;
289         u8 data;
290
291         data = pconf->ssc_adj_period;
292         data &= 0x0ff;
293         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
294         data = (pconf->ssc_adj_period >> 8);
295         data &= 0x03;
296         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
297
298         data = pconf->ssc_period;
299         data &= 0x0ff;
300         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
301         data = (pconf->ssc_period >> 8);
302         data &= 0x0ff;
303         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
304
305         data = pconf->ssc_step_size;
306         data &= 0x0ff;
307         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
308         data = (pconf->ssc_step_size >> 8);
309         data &= 0x0ff;
310         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
311
312         data = (pconf->ssc_center & 0x01);
313         data <<= 1;
314         data |= 0x01; /* enable */
315         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
316
317         wmb();  /* make sure register committed */
318 }
319
320 static void pll_db_commit_common(struct dsi_pll_14nm *pll,
321                                  struct dsi_pll_config *pconf)
322 {
323         void __iomem *base = pll->phy->pll_base;
324         u8 data;
325
326         /* confgiure the non frequency dependent pll registers */
327         data = 0;
328         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
329
330         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, 1);
331
332         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, 48);
333         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, 4 << 3); /* bandgap_timer */
334         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, 5); /* pll_wakeup_timer */
335
336         data = pconf->pll_vco_div_ref & 0xff;
337         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
338         data = (pconf->pll_vco_div_ref >> 8) & 0x3;
339         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
340
341         data = pconf->pll_kvco_div_ref & 0xff;
342         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
343         data = (pconf->pll_kvco_div_ref >> 8) & 0x3;
344         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
345
346         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, 16);
347
348         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, 4);
349
350         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, 4);
351
352         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, 1 << 3 | 1);
353
354         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, 0 << 3 | 0);
355
356         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, 0 << 3 | 0);
357
358         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, 4 << 3 | 4);
359
360         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, 1 << 4 | 11);
361
362         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, 7);
363
364         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, 1 << 4 | 2);
365 }
366
367 static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
368 {
369         void __iomem *cmn_base = pll_14nm->phy->base;
370
371         /* de assert pll start and apply pll sw reset */
372
373         /* stop pll */
374         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
375
376         /* pll sw reset */
377         dsi_phy_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
378         wmb();  /* make sure register committed */
379
380         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
381         wmb();  /* make sure register committed */
382 }
383
384 static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
385                                struct dsi_pll_config *pconf)
386 {
387         void __iomem *base = pll->phy->pll_base;
388         void __iomem *cmn_base = pll->phy->base;
389         u8 data;
390
391         DBG("DSI%d PLL", pll->phy->id);
392
393         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, 0x3c);
394
395         pll_db_commit_common(pll, pconf);
396
397         pll_14nm_software_reset(pll);
398
399         /* Use the /2 path in Mux */
400         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, 1);
401
402         data = 0xff; /* data, clk, pll normal operation */
403         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
404
405         /* configure the frequency dependent pll registers */
406         data = pconf->dec_start;
407         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
408
409         data = pconf->div_frac_start & 0xff;
410         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
411         data = (pconf->div_frac_start >> 8) & 0xff;
412         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
413         data = (pconf->div_frac_start >> 16) & 0xf;
414         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
415
416         data = pconf->plllock_cmp & 0xff;
417         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
418
419         data = (pconf->plllock_cmp >> 8) & 0xff;
420         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
421
422         data = (pconf->plllock_cmp >> 16) & 0x3;
423         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
424
425         data = pconf->plllock_cnt << 1 | 0 << 3; /* plllock_rng */
426         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
427
428         data = pconf->pll_vco_count & 0xff;
429         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
430         data = (pconf->pll_vco_count >> 8) & 0xff;
431         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
432
433         data = pconf->pll_kvco_count & 0xff;
434         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
435         data = (pconf->pll_kvco_count >> 8) & 0x3;
436         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
437
438         /*
439          * High nibble configures the post divider internal to the VCO. It's
440          * fixed to divide by 1 for now.
441          *
442          * 0: divided by 1
443          * 1: divided by 2
444          * 2: divided by 4
445          * 3: divided by 8
446          */
447         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, 0 << 4 | 3);
448
449         if (pconf->ssc_en)
450                 pll_db_commit_ssc(pll, pconf);
451
452         wmb();  /* make sure register committed */
453 }
454
455 /*
456  * VCO clock Callbacks
457  */
458 static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
459                                      unsigned long parent_rate)
460 {
461         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
462         struct dsi_pll_config conf;
463
464         DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->phy->id, rate,
465             parent_rate);
466
467         dsi_pll_14nm_config_init(&conf);
468         conf.vco_current_rate = rate;
469
470         pll_14nm_dec_frac_calc(pll_14nm, &conf);
471
472         if (conf.ssc_en)
473                 pll_14nm_ssc_calc(pll_14nm, &conf);
474
475         pll_14nm_calc_vco_count(pll_14nm, &conf);
476
477         /* commit the slave DSI PLL registers if we're master. Note that we
478          * don't lock the slave PLL. We just ensure that the PLL/PHY registers
479          * of the master and slave are identical
480          */
481         if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
482                 struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
483
484                 pll_db_commit_14nm(pll_14nm_slave, &conf);
485         }
486
487         pll_db_commit_14nm(pll_14nm, &conf);
488
489         return 0;
490 }
491
492 static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
493                                                   unsigned long parent_rate)
494 {
495         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
496         void __iomem *base = pll_14nm->phy->pll_base;
497         u64 vco_rate, multiplier = BIT(20);
498         u32 div_frac_start;
499         u32 dec_start;
500         u64 ref_clk = parent_rate;
501
502         dec_start = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
503         dec_start &= 0x0ff;
504
505         DBG("dec_start = %x", dec_start);
506
507         div_frac_start = (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
508                                 & 0xf) << 16;
509         div_frac_start |= (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
510                                 & 0xff) << 8;
511         div_frac_start |= dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
512                                 & 0xff;
513
514         DBG("div_frac_start = %x", div_frac_start);
515
516         vco_rate = ref_clk * dec_start;
517
518         vco_rate += ((ref_clk * div_frac_start) / multiplier);
519
520         /*
521          * Recalculating the rate from dec_start and frac_start doesn't end up
522          * the rate we originally set. Convert the freq to KHz, round it up and
523          * convert it back to MHz.
524          */
525         vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
526
527         DBG("returning vco rate = %lu", (unsigned long)vco_rate);
528
529         return (unsigned long)vco_rate;
530 }
531
532 static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
533 {
534         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
535         void __iomem *base = pll_14nm->phy->pll_base;
536         void __iomem *cmn_base = pll_14nm->phy->base;
537         bool locked;
538
539         DBG("");
540
541         if (unlikely(pll_14nm->phy->pll_on))
542                 return 0;
543
544         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
545         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
546
547         locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
548                                          POLL_TIMEOUT_US);
549
550         if (unlikely(!locked)) {
551                 DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, "DSI PLL lock failed\n");
552                 return -EINVAL;
553         }
554
555         DBG("DSI PLL lock success");
556         pll_14nm->phy->pll_on = true;
557
558         return 0;
559 }
560
561 static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
562 {
563         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
564         void __iomem *cmn_base = pll_14nm->phy->base;
565
566         DBG("");
567
568         if (unlikely(!pll_14nm->phy->pll_on))
569                 return;
570
571         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
572
573         pll_14nm->phy->pll_on = false;
574 }
575
576 static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
577                 unsigned long rate, unsigned long *parent_rate)
578 {
579         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
580
581         if      (rate < pll_14nm->phy->cfg->min_pll_rate)
582                 return  pll_14nm->phy->cfg->min_pll_rate;
583         else if (rate > pll_14nm->phy->cfg->max_pll_rate)
584                 return  pll_14nm->phy->cfg->max_pll_rate;
585         else
586                 return rate;
587 }
588
589 static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
590         .round_rate = dsi_pll_14nm_clk_round_rate,
591         .set_rate = dsi_pll_14nm_vco_set_rate,
592         .recalc_rate = dsi_pll_14nm_vco_recalc_rate,
593         .prepare = dsi_pll_14nm_vco_prepare,
594         .unprepare = dsi_pll_14nm_vco_unprepare,
595 };
596
597 /*
598  * N1 and N2 post-divider clock callbacks
599  */
600 #define div_mask(width) ((1 << (width)) - 1)
601 static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
602                                                       unsigned long parent_rate)
603 {
604         struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
605         struct dsi_pll_14nm *pll_14nm = postdiv->pll;
606         void __iomem *base = pll_14nm->phy->base;
607         u8 shift = postdiv->shift;
608         u8 width = postdiv->width;
609         u32 val;
610
611         DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, parent_rate);
612
613         val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
614         val &= div_mask(width);
615
616         return divider_recalc_rate(hw, parent_rate, val, NULL,
617                                    postdiv->flags, width);
618 }
619
620 static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
621                                             unsigned long rate,
622                                             unsigned long *prate)
623 {
624         struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
625         struct dsi_pll_14nm *pll_14nm = postdiv->pll;
626
627         DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
628
629         return divider_round_rate(hw, rate, prate, NULL,
630                                   postdiv->width,
631                                   postdiv->flags);
632 }
633
634 static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
635                                          unsigned long parent_rate)
636 {
637         struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
638         struct dsi_pll_14nm *pll_14nm = postdiv->pll;
639         void __iomem *base = pll_14nm->phy->base;
640         spinlock_t *lock = &pll_14nm->postdiv_lock;
641         u8 shift = postdiv->shift;
642         u8 width = postdiv->width;
643         unsigned int value;
644         unsigned long flags = 0;
645         u32 val;
646
647         DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->phy->id, rate,
648             parent_rate);
649
650         value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
651                                 postdiv->flags);
652
653         spin_lock_irqsave(lock, flags);
654
655         val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
656         val &= ~(div_mask(width) << shift);
657
658         val |= value << shift;
659         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
660
661         /* If we're master in bonded DSI mode, then the slave PLL's post-dividers
662          * follow the master's post dividers
663          */
664         if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
665                 struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
666                 void __iomem *slave_base = pll_14nm_slave->phy->base;
667
668                 dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
669         }
670
671         spin_unlock_irqrestore(lock, flags);
672
673         return 0;
674 }
675
676 static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
677         .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
678         .round_rate = dsi_pll_14nm_postdiv_round_rate,
679         .set_rate = dsi_pll_14nm_postdiv_set_rate,
680 };
681
682 /*
683  * PLL Callbacks
684  */
685
686 static void dsi_14nm_pll_save_state(struct msm_dsi_phy *phy)
687 {
688         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
689         struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
690         void __iomem *cmn_base = pll_14nm->phy->base;
691         u32 data;
692
693         data = dsi_phy_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
694
695         cached_state->n1postdiv = data & 0xf;
696         cached_state->n2postdiv = (data >> 4) & 0xf;
697
698         DBG("DSI%d PLL save state %x %x", pll_14nm->phy->id,
699             cached_state->n1postdiv, cached_state->n2postdiv);
700
701         cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
702 }
703
704 static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
705 {
706         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
707         struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
708         void __iomem *cmn_base = pll_14nm->phy->base;
709         u32 data;
710         int ret;
711
712         ret = dsi_pll_14nm_vco_set_rate(phy->vco_hw,
713                                         cached_state->vco_rate, 0);
714         if (ret) {
715                 DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
716                         "restore vco rate failed. ret=%d\n", ret);
717                 return ret;
718         }
719
720         data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
721
722         DBG("DSI%d PLL restore state %x %x", pll_14nm->phy->id,
723             cached_state->n1postdiv, cached_state->n2postdiv);
724
725         dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
726
727         /* also restore post-dividers for slave DSI PLL */
728         if (phy->usecase == MSM_DSI_PHY_MASTER) {
729                 struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
730                 void __iomem *slave_base = pll_14nm_slave->phy->base;
731
732                 dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
733         }
734
735         return 0;
736 }
737
738 static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
739 {
740         struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
741         void __iomem *base = phy->pll_base;
742         u32 clkbuflr_en, bandgap = 0;
743
744         switch (phy->usecase) {
745         case MSM_DSI_PHY_STANDALONE:
746                 clkbuflr_en = 0x1;
747                 break;
748         case MSM_DSI_PHY_MASTER:
749                 clkbuflr_en = 0x3;
750                 pll_14nm->slave = pll_14nm_list[(pll_14nm->phy->id + 1) % DSI_MAX];
751                 break;
752         case MSM_DSI_PHY_SLAVE:
753                 clkbuflr_en = 0x0;
754                 bandgap = 0x3;
755                 break;
756         default:
757                 return -EINVAL;
758         }
759
760         dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
761         if (bandgap)
762                 dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
763
764         return 0;
765 }
766
767 static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
768                                                 const char *name,
769                                                 const char *parent_name,
770                                                 unsigned long flags,
771                                                 u8 shift)
772 {
773         struct dsi_pll_14nm_postdiv *pll_postdiv;
774         struct device *dev = &pll_14nm->phy->pdev->dev;
775         struct clk_init_data postdiv_init = {
776                 .parent_names = (const char *[]) { parent_name },
777                 .num_parents = 1,
778                 .name = name,
779                 .flags = flags,
780                 .ops = &clk_ops_dsi_pll_14nm_postdiv,
781         };
782         int ret;
783
784         pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
785         if (!pll_postdiv)
786                 return ERR_PTR(-ENOMEM);
787
788         pll_postdiv->pll = pll_14nm;
789         pll_postdiv->shift = shift;
790         /* both N1 and N2 postdividers are 4 bits wide */
791         pll_postdiv->width = 4;
792         /* range of each divider is from 1 to 15 */
793         pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
794         pll_postdiv->hw.init = &postdiv_init;
795
796         ret = devm_clk_hw_register(dev, &pll_postdiv->hw);
797         if (ret)
798                 return ERR_PTR(ret);
799
800         return &pll_postdiv->hw;
801 }
802
803 static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
804 {
805         char clk_name[32], parent[32], vco_name[32];
806         struct clk_init_data vco_init = {
807                 .parent_names = (const char *[]){ "xo" },
808                 .num_parents = 1,
809                 .name = vco_name,
810                 .flags = CLK_IGNORE_UNUSED,
811                 .ops = &clk_ops_dsi_pll_14nm_vco,
812         };
813         struct device *dev = &pll_14nm->phy->pdev->dev;
814         struct clk_hw *hw;
815         int ret;
816
817         DBG("DSI%d", pll_14nm->phy->id);
818
819         snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->phy->id);
820         pll_14nm->clk_hw.init = &vco_init;
821
822         ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
823         if (ret)
824                 return ret;
825
826         snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
827         snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->phy->id);
828
829         /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
830         hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
831                                        CLK_SET_RATE_PARENT, 0);
832         if (IS_ERR(hw))
833                 return PTR_ERR(hw);
834
835         snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->phy->id);
836         snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
837
838         /* DSI Byte clock = VCO_CLK / N1 / 8 */
839         hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
840                                           CLK_SET_RATE_PARENT, 1, 8);
841         if (IS_ERR(hw))
842                 return PTR_ERR(hw);
843
844         provided_clocks[DSI_BYTE_PLL_CLK] = hw;
845
846         snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
847         snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
848
849         /*
850          * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
851          * on the way. Don't let it set parent.
852          */
853         hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
854         if (IS_ERR(hw))
855                 return PTR_ERR(hw);
856
857         snprintf(clk_name, 32, "dsi%dpll", pll_14nm->phy->id);
858         snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
859
860         /* DSI pixel clock = VCO_CLK / N1 / 2 / N2
861          * This is the output of N2 post-divider, bits 4-7 in
862          * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
863          */
864         hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
865         if (IS_ERR(hw))
866                 return PTR_ERR(hw);
867
868         provided_clocks[DSI_PIXEL_PLL_CLK]      = hw;
869
870         return 0;
871 }
872
873 static int dsi_pll_14nm_init(struct msm_dsi_phy *phy)
874 {
875         struct platform_device *pdev = phy->pdev;
876         struct dsi_pll_14nm *pll_14nm;
877         int ret;
878
879         if (!pdev)
880                 return -ENODEV;
881
882         pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
883         if (!pll_14nm)
884                 return -ENOMEM;
885
886         DBG("PLL%d", phy->id);
887
888         pll_14nm_list[phy->id] = pll_14nm;
889
890         spin_lock_init(&pll_14nm->postdiv_lock);
891
892         pll_14nm->phy = phy;
893
894         ret = pll_14nm_register(pll_14nm, phy->provided_clocks->hws);
895         if (ret) {
896                 DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
897                 return ret;
898         }
899
900         phy->vco_hw = &pll_14nm->clk_hw;
901
902         return 0;
903 }
904
905 static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
906                                      struct msm_dsi_dphy_timing *timing,
907                                      int lane_idx)
908 {
909         void __iomem *base = phy->lane_base;
910         bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX);
911         u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero;
912         u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare;
913         u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail;
914         u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst;
915         u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly;
916         u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
917                                    timing->hs_halfbyte_en;
918
919         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
920                       DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
921         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
922                       DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
923         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
924                       DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
925         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
926                       DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
927         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
928                       DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
929         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
930                       DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
931         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
932                       halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
933         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
934                       DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
935                       DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
936         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
937                       DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
938         dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
939                       DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
940 }
941
942 static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
943                                struct msm_dsi_phy_clk_request *clk_req)
944 {
945         struct msm_dsi_dphy_timing *timing = &phy->timing;
946         u32 data;
947         int i;
948         int ret;
949         void __iomem *base = phy->base;
950         void __iomem *lane_base = phy->lane_base;
951         u32 glbl_test_ctrl;
952
953         if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
954                 DRM_DEV_ERROR(&phy->pdev->dev,
955                         "%s: D-PHY timing calculation failed\n", __func__);
956                 return -EINVAL;
957         }
958
959         data = 0x1c;
960         if (phy->usecase != MSM_DSI_PHY_STANDALONE)
961                 data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
962         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
963
964         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
965
966         /* 4 data lanes + 1 clk lane configuration */
967         for (i = 0; i < 5; i++) {
968                 dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
969                               0x1d);
970
971                 dsi_phy_write(lane_base +
972                               REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
973                 dsi_phy_write(lane_base +
974                               REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
975                               (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
976
977                 dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
978                               (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
979                 dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
980                 dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
981                               0);
982                 dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
983                               0x88);
984
985                 dsi_14nm_dphy_set_timing(phy, timing, i);
986         }
987
988         /* Make sure PLL is not start */
989         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
990
991         wmb(); /* make sure everything is written before reset and enable */
992
993         /* reset digital block */
994         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
995         wmb(); /* ensure reset is asserted */
996         udelay(100);
997         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
998
999         glbl_test_ctrl = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
1000         if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
1001                 glbl_test_ctrl |= DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
1002         else
1003                 glbl_test_ctrl &= ~DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
1004         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, glbl_test_ctrl);
1005         ret = dsi_14nm_set_usecase(phy);
1006         if (ret) {
1007                 DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
1008                         __func__, ret);
1009                 return ret;
1010         }
1011
1012         /* Remove power down from PLL and all lanes */
1013         dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
1014
1015         return 0;
1016 }
1017
1018 static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
1019 {
1020         dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
1021         dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
1022
1023         /* ensure that the phy is completely disabled */
1024         wmb();
1025 }
1026
1027 const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
1028         .has_phy_lane = true,
1029         .reg_cfg = {
1030                 .num = 1,
1031                 .regs = {
1032                         {"vcca", 17000, 32},
1033                 },
1034         },
1035         .ops = {
1036                 .enable = dsi_14nm_phy_enable,
1037                 .disable = dsi_14nm_phy_disable,
1038                 .pll_init = dsi_pll_14nm_init,
1039                 .save_pll_state = dsi_14nm_pll_save_state,
1040                 .restore_pll_state = dsi_14nm_pll_restore_state,
1041         },
1042         .min_pll_rate = VCO_MIN_RATE,
1043         .max_pll_rate = VCO_MAX_RATE,
1044         .io_start = { 0x994400, 0x996400 },
1045         .num_dsi_phy = 2,
1046 };
1047
1048 const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
1049         .has_phy_lane = true,
1050         .reg_cfg = {
1051                 .num = 1,
1052                 .regs = {
1053                         {"vcca", 73400, 32},
1054                 },
1055         },
1056         .ops = {
1057                 .enable = dsi_14nm_phy_enable,
1058                 .disable = dsi_14nm_phy_disable,
1059                 .pll_init = dsi_pll_14nm_init,
1060                 .save_pll_state = dsi_14nm_pll_save_state,
1061                 .restore_pll_state = dsi_14nm_pll_restore_state,
1062         },
1063         .min_pll_rate = VCO_MIN_RATE,
1064         .max_pll_rate = VCO_MAX_RATE,
1065         .io_start = { 0xc994400, 0xc996000 },
1066         .num_dsi_phy = 2,
1067 };