drm/i915: migrate pll enable/disable code to intel_dpll.[ch]
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / display / intel_pps.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include "i915_drv.h"
7 #include "intel_display_types.h"
8 #include "intel_dp.h"
9 #include "intel_dpll.h"
10 #include "intel_pps.h"
11
12 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
13                                       enum pipe pipe);
14
15 static void pps_init_delays(struct intel_dp *intel_dp);
16 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
17
18 intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
19 {
20         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
21         intel_wakeref_t wakeref;
22
23         /*
24          * See intel_pps_reset_all() why we need a power domain reference here.
25          */
26         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
27         mutex_lock(&dev_priv->pps_mutex);
28
29         return wakeref;
30 }
31
32 intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
33                                  intel_wakeref_t wakeref)
34 {
35         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
36
37         mutex_unlock(&dev_priv->pps_mutex);
38         intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
39
40         return 0;
41 }
42
43 static void
44 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
45 {
46         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
47         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
48         enum pipe pipe = intel_dp->pps.pps_pipe;
49         bool pll_enabled, release_cl_override = false;
50         enum dpio_phy phy = DPIO_PHY(pipe);
51         enum dpio_channel ch = vlv_pipe_to_channel(pipe);
52         u32 DP;
53
54         if (drm_WARN(&dev_priv->drm,
55                      intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
56                      "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
57                      pipe_name(pipe), dig_port->base.base.base.id,
58                      dig_port->base.base.name))
59                 return;
60
61         drm_dbg_kms(&dev_priv->drm,
62                     "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
63                     pipe_name(pipe), dig_port->base.base.base.id,
64                     dig_port->base.base.name);
65
66         /* Preserve the BIOS-computed detected bit. This is
67          * supposed to be read-only.
68          */
69         DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
70         DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
71         DP |= DP_PORT_WIDTH(1);
72         DP |= DP_LINK_TRAIN_PAT_1;
73
74         if (IS_CHERRYVIEW(dev_priv))
75                 DP |= DP_PIPE_SEL_CHV(pipe);
76         else
77                 DP |= DP_PIPE_SEL(pipe);
78
79         pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
80
81         /*
82          * The DPLL for the pipe must be enabled for this to work.
83          * So enable temporarily it if it's not already enabled.
84          */
85         if (!pll_enabled) {
86                 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
87                         !chv_phy_powergate_ch(dev_priv, phy, ch, true);
88
89                 if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
90                         drm_err(&dev_priv->drm,
91                                 "Failed to force on pll for pipe %c!\n",
92                                 pipe_name(pipe));
93                         return;
94                 }
95         }
96
97         /*
98          * Similar magic as in intel_dp_enable_port().
99          * We _must_ do this port enable + disable trick
100          * to make this power sequencer lock onto the port.
101          * Otherwise even VDD force bit won't work.
102          */
103         intel_de_write(dev_priv, intel_dp->output_reg, DP);
104         intel_de_posting_read(dev_priv, intel_dp->output_reg);
105
106         intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
107         intel_de_posting_read(dev_priv, intel_dp->output_reg);
108
109         intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
110         intel_de_posting_read(dev_priv, intel_dp->output_reg);
111
112         if (!pll_enabled) {
113                 vlv_force_pll_off(dev_priv, pipe);
114
115                 if (release_cl_override)
116                         chv_phy_powergate_ch(dev_priv, phy, ch, false);
117         }
118 }
119
120 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
121 {
122         struct intel_encoder *encoder;
123         unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
124
125         /*
126          * We don't have power sequencer currently.
127          * Pick one that's not used by other ports.
128          */
129         for_each_intel_dp(&dev_priv->drm, encoder) {
130                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
131
132                 if (encoder->type == INTEL_OUTPUT_EDP) {
133                         drm_WARN_ON(&dev_priv->drm,
134                                     intel_dp->pps.active_pipe != INVALID_PIPE &&
135                                     intel_dp->pps.active_pipe !=
136                                     intel_dp->pps.pps_pipe);
137
138                         if (intel_dp->pps.pps_pipe != INVALID_PIPE)
139                                 pipes &= ~(1 << intel_dp->pps.pps_pipe);
140                 } else {
141                         drm_WARN_ON(&dev_priv->drm,
142                                     intel_dp->pps.pps_pipe != INVALID_PIPE);
143
144                         if (intel_dp->pps.active_pipe != INVALID_PIPE)
145                                 pipes &= ~(1 << intel_dp->pps.active_pipe);
146                 }
147         }
148
149         if (pipes == 0)
150                 return INVALID_PIPE;
151
152         return ffs(pipes) - 1;
153 }
154
155 static enum pipe
156 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
157 {
158         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
159         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
160         enum pipe pipe;
161
162         lockdep_assert_held(&dev_priv->pps_mutex);
163
164         /* We should never land here with regular DP ports */
165         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
166
167         drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
168                     intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
169
170         if (intel_dp->pps.pps_pipe != INVALID_PIPE)
171                 return intel_dp->pps.pps_pipe;
172
173         pipe = vlv_find_free_pps(dev_priv);
174
175         /*
176          * Didn't find one. This should not happen since there
177          * are two power sequencers and up to two eDP ports.
178          */
179         if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
180                 pipe = PIPE_A;
181
182         vlv_steal_power_sequencer(dev_priv, pipe);
183         intel_dp->pps.pps_pipe = pipe;
184
185         drm_dbg_kms(&dev_priv->drm,
186                     "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
187                     pipe_name(intel_dp->pps.pps_pipe),
188                     dig_port->base.base.base.id,
189                     dig_port->base.base.name);
190
191         /* init power sequencer on this pipe and port */
192         pps_init_delays(intel_dp);
193         pps_init_registers(intel_dp, true);
194
195         /*
196          * Even vdd force doesn't work until we've made
197          * the power sequencer lock in on the port.
198          */
199         vlv_power_sequencer_kick(intel_dp);
200
201         return intel_dp->pps.pps_pipe;
202 }
203
204 static int
205 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
206 {
207         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
208         int backlight_controller = dev_priv->vbt.backlight.controller;
209
210         lockdep_assert_held(&dev_priv->pps_mutex);
211
212         /* We should never land here with regular DP ports */
213         drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
214
215         if (!intel_dp->pps.pps_reset)
216                 return backlight_controller;
217
218         intel_dp->pps.pps_reset = false;
219
220         /*
221          * Only the HW needs to be reprogrammed, the SW state is fixed and
222          * has been setup during connector init.
223          */
224         pps_init_registers(intel_dp, false);
225
226         return backlight_controller;
227 }
228
229 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
230                                enum pipe pipe);
231
232 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
233                                enum pipe pipe)
234 {
235         return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
236 }
237
238 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
239                                 enum pipe pipe)
240 {
241         return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
242 }
243
244 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
245                          enum pipe pipe)
246 {
247         return true;
248 }
249
250 static enum pipe
251 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
252                      enum port port,
253                      vlv_pipe_check pipe_check)
254 {
255         enum pipe pipe;
256
257         for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
258                 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
259                         PANEL_PORT_SELECT_MASK;
260
261                 if (port_sel != PANEL_PORT_SELECT_VLV(port))
262                         continue;
263
264                 if (!pipe_check(dev_priv, pipe))
265                         continue;
266
267                 return pipe;
268         }
269
270         return INVALID_PIPE;
271 }
272
273 static void
274 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
275 {
276         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
277         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
278         enum port port = dig_port->base.port;
279
280         lockdep_assert_held(&dev_priv->pps_mutex);
281
282         /* try to find a pipe with this port selected */
283         /* first pick one where the panel is on */
284         intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
285                                                       vlv_pipe_has_pp_on);
286         /* didn't find one? pick one where vdd is on */
287         if (intel_dp->pps.pps_pipe == INVALID_PIPE)
288                 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
289                                                               vlv_pipe_has_vdd_on);
290         /* didn't find one? pick one with just the correct port */
291         if (intel_dp->pps.pps_pipe == INVALID_PIPE)
292                 intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
293                                                               vlv_pipe_any);
294
295         /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
296         if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
297                 drm_dbg_kms(&dev_priv->drm,
298                             "no initial power sequencer for [ENCODER:%d:%s]\n",
299                             dig_port->base.base.base.id,
300                             dig_port->base.base.name);
301                 return;
302         }
303
304         drm_dbg_kms(&dev_priv->drm,
305                     "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
306                     dig_port->base.base.base.id,
307                     dig_port->base.base.name,
308                     pipe_name(intel_dp->pps.pps_pipe));
309 }
310
311 void intel_pps_reset_all(struct drm_i915_private *dev_priv)
312 {
313         struct intel_encoder *encoder;
314
315         if (drm_WARN_ON(&dev_priv->drm,
316                         !(IS_VALLEYVIEW(dev_priv) ||
317                           IS_CHERRYVIEW(dev_priv) ||
318                           IS_GEN9_LP(dev_priv))))
319                 return;
320
321         /*
322          * We can't grab pps_mutex here due to deadlock with power_domain
323          * mutex when power_domain functions are called while holding pps_mutex.
324          * That also means that in order to use pps_pipe the code needs to
325          * hold both a power domain reference and pps_mutex, and the power domain
326          * reference get/put must be done while _not_ holding pps_mutex.
327          * pps_{lock,unlock}() do these steps in the correct order, so one
328          * should use them always.
329          */
330
331         for_each_intel_dp(&dev_priv->drm, encoder) {
332                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
333
334                 drm_WARN_ON(&dev_priv->drm,
335                             intel_dp->pps.active_pipe != INVALID_PIPE);
336
337                 if (encoder->type != INTEL_OUTPUT_EDP)
338                         continue;
339
340                 if (IS_GEN9_LP(dev_priv))
341                         intel_dp->pps.pps_reset = true;
342                 else
343                         intel_dp->pps.pps_pipe = INVALID_PIPE;
344         }
345 }
346
347 struct pps_registers {
348         i915_reg_t pp_ctrl;
349         i915_reg_t pp_stat;
350         i915_reg_t pp_on;
351         i915_reg_t pp_off;
352         i915_reg_t pp_div;
353 };
354
355 static void intel_pps_get_registers(struct intel_dp *intel_dp,
356                                     struct pps_registers *regs)
357 {
358         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
359         int pps_idx = 0;
360
361         memset(regs, 0, sizeof(*regs));
362
363         if (IS_GEN9_LP(dev_priv))
364                 pps_idx = bxt_power_sequencer_idx(intel_dp);
365         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
366                 pps_idx = vlv_power_sequencer_pipe(intel_dp);
367
368         regs->pp_ctrl = PP_CONTROL(pps_idx);
369         regs->pp_stat = PP_STATUS(pps_idx);
370         regs->pp_on = PP_ON_DELAYS(pps_idx);
371         regs->pp_off = PP_OFF_DELAYS(pps_idx);
372
373         /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
374         if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
375                 regs->pp_div = INVALID_MMIO_REG;
376         else
377                 regs->pp_div = PP_DIVISOR(pps_idx);
378 }
379
380 static i915_reg_t
381 _pp_ctrl_reg(struct intel_dp *intel_dp)
382 {
383         struct pps_registers regs;
384
385         intel_pps_get_registers(intel_dp, &regs);
386
387         return regs.pp_ctrl;
388 }
389
390 static i915_reg_t
391 _pp_stat_reg(struct intel_dp *intel_dp)
392 {
393         struct pps_registers regs;
394
395         intel_pps_get_registers(intel_dp, &regs);
396
397         return regs.pp_stat;
398 }
399
400 static bool edp_have_panel_power(struct intel_dp *intel_dp)
401 {
402         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
403
404         lockdep_assert_held(&dev_priv->pps_mutex);
405
406         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
407             intel_dp->pps.pps_pipe == INVALID_PIPE)
408                 return false;
409
410         return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
411 }
412
413 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
414 {
415         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
416
417         lockdep_assert_held(&dev_priv->pps_mutex);
418
419         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
420             intel_dp->pps.pps_pipe == INVALID_PIPE)
421                 return false;
422
423         return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
424 }
425
426 void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
427 {
428         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
429
430         if (!intel_dp_is_edp(intel_dp))
431                 return;
432
433         if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
434                 drm_WARN(&dev_priv->drm, 1,
435                          "eDP powered off while attempting aux channel communication.\n");
436                 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
437                             intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
438                             intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
439         }
440 }
441
442 #define IDLE_ON_MASK            (PP_ON | PP_SEQUENCE_MASK | 0                     | PP_SEQUENCE_STATE_MASK)
443 #define IDLE_ON_VALUE           (PP_ON | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_ON_IDLE)
444
445 #define IDLE_OFF_MASK           (PP_ON | PP_SEQUENCE_MASK | 0                     | 0)
446 #define IDLE_OFF_VALUE          (0     | PP_SEQUENCE_NONE | 0                     | 0)
447
448 #define IDLE_CYCLE_MASK         (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
449 #define IDLE_CYCLE_VALUE        (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
450
451 static void intel_pps_verify_state(struct intel_dp *intel_dp);
452
453 static void wait_panel_status(struct intel_dp *intel_dp,
454                                        u32 mask,
455                                        u32 value)
456 {
457         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
458         i915_reg_t pp_stat_reg, pp_ctrl_reg;
459
460         lockdep_assert_held(&dev_priv->pps_mutex);
461
462         intel_pps_verify_state(intel_dp);
463
464         pp_stat_reg = _pp_stat_reg(intel_dp);
465         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
466
467         drm_dbg_kms(&dev_priv->drm,
468                     "mask %08x value %08x status %08x control %08x\n",
469                     mask, value,
470                     intel_de_read(dev_priv, pp_stat_reg),
471                     intel_de_read(dev_priv, pp_ctrl_reg));
472
473         if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
474                                        mask, value, 5000))
475                 drm_err(&dev_priv->drm,
476                         "Panel status timeout: status %08x control %08x\n",
477                         intel_de_read(dev_priv, pp_stat_reg),
478                         intel_de_read(dev_priv, pp_ctrl_reg));
479
480         drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
481 }
482
483 static void wait_panel_on(struct intel_dp *intel_dp)
484 {
485         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
486
487         drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
488         wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
489 }
490
491 static void wait_panel_off(struct intel_dp *intel_dp)
492 {
493         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
494
495         drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
496         wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
497 }
498
499 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
500 {
501         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
502         ktime_t panel_power_on_time;
503         s64 panel_power_off_duration;
504
505         drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
506
507         /* take the difference of currrent time and panel power off time
508          * and then make panel wait for t11_t12 if needed. */
509         panel_power_on_time = ktime_get_boottime();
510         panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->pps.panel_power_off_time);
511
512         /* When we disable the VDD override bit last we have to do the manual
513          * wait. */
514         if (panel_power_off_duration < (s64)intel_dp->pps.panel_power_cycle_delay)
515                 wait_remaining_ms_from_jiffies(jiffies,
516                                        intel_dp->pps.panel_power_cycle_delay - panel_power_off_duration);
517
518         wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
519 }
520
521 void intel_pps_wait_power_cycle(struct intel_dp *intel_dp)
522 {
523         intel_wakeref_t wakeref;
524
525         if (!intel_dp_is_edp(intel_dp))
526                 return;
527
528         with_intel_pps_lock(intel_dp, wakeref)
529                 wait_panel_power_cycle(intel_dp);
530 }
531
532 static void wait_backlight_on(struct intel_dp *intel_dp)
533 {
534         wait_remaining_ms_from_jiffies(intel_dp->pps.last_power_on,
535                                        intel_dp->pps.backlight_on_delay);
536 }
537
538 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
539 {
540         wait_remaining_ms_from_jiffies(intel_dp->pps.last_backlight_off,
541                                        intel_dp->pps.backlight_off_delay);
542 }
543
544 /* Read the current pp_control value, unlocking the register if it
545  * is locked
546  */
547
548 static  u32 ilk_get_pp_control(struct intel_dp *intel_dp)
549 {
550         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
551         u32 control;
552
553         lockdep_assert_held(&dev_priv->pps_mutex);
554
555         control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
556         if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
557                         (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
558                 control &= ~PANEL_UNLOCK_MASK;
559                 control |= PANEL_UNLOCK_REGS;
560         }
561         return control;
562 }
563
564 /*
565  * Must be paired with intel_pps_vdd_off_unlocked().
566  * Must hold pps_mutex around the whole on/off sequence.
567  * Can be nested with intel_pps_vdd_{on,off}() calls.
568  */
569 bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
570 {
571         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
572         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
573         u32 pp;
574         i915_reg_t pp_stat_reg, pp_ctrl_reg;
575         bool need_to_disable = !intel_dp->pps.want_panel_vdd;
576
577         lockdep_assert_held(&dev_priv->pps_mutex);
578
579         if (!intel_dp_is_edp(intel_dp))
580                 return false;
581
582         cancel_delayed_work(&intel_dp->pps.panel_vdd_work);
583         intel_dp->pps.want_panel_vdd = true;
584
585         if (edp_have_panel_vdd(intel_dp))
586                 return need_to_disable;
587
588         drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
589         intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
590                                                             intel_aux_power_domain(dig_port));
591
592         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
593                     dig_port->base.base.base.id,
594                     dig_port->base.base.name);
595
596         if (!edp_have_panel_power(intel_dp))
597                 wait_panel_power_cycle(intel_dp);
598
599         pp = ilk_get_pp_control(intel_dp);
600         pp |= EDP_FORCE_VDD;
601
602         pp_stat_reg = _pp_stat_reg(intel_dp);
603         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
604
605         intel_de_write(dev_priv, pp_ctrl_reg, pp);
606         intel_de_posting_read(dev_priv, pp_ctrl_reg);
607         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
608                     intel_de_read(dev_priv, pp_stat_reg),
609                     intel_de_read(dev_priv, pp_ctrl_reg));
610         /*
611          * If the panel wasn't on, delay before accessing aux channel
612          */
613         if (!edp_have_panel_power(intel_dp)) {
614                 drm_dbg_kms(&dev_priv->drm,
615                             "[ENCODER:%d:%s] panel power wasn't enabled\n",
616                             dig_port->base.base.base.id,
617                             dig_port->base.base.name);
618                 msleep(intel_dp->pps.panel_power_up_delay);
619         }
620
621         return need_to_disable;
622 }
623
624 /*
625  * Must be paired with intel_pps_off().
626  * Nested calls to these functions are not allowed since
627  * we drop the lock. Caller must use some higher level
628  * locking to prevent nested calls from other threads.
629  */
630 void intel_pps_vdd_on(struct intel_dp *intel_dp)
631 {
632         intel_wakeref_t wakeref;
633         bool vdd;
634
635         if (!intel_dp_is_edp(intel_dp))
636                 return;
637
638         vdd = false;
639         with_intel_pps_lock(intel_dp, wakeref)
640                 vdd = intel_pps_vdd_on_unlocked(intel_dp);
641         I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
642                         dp_to_dig_port(intel_dp)->base.base.base.id,
643                         dp_to_dig_port(intel_dp)->base.base.name);
644 }
645
646 static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
647 {
648         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
649         struct intel_digital_port *dig_port =
650                 dp_to_dig_port(intel_dp);
651         u32 pp;
652         i915_reg_t pp_stat_reg, pp_ctrl_reg;
653
654         lockdep_assert_held(&dev_priv->pps_mutex);
655
656         drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
657
658         if (!edp_have_panel_vdd(intel_dp))
659                 return;
660
661         drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
662                     dig_port->base.base.base.id,
663                     dig_port->base.base.name);
664
665         pp = ilk_get_pp_control(intel_dp);
666         pp &= ~EDP_FORCE_VDD;
667
668         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
669         pp_stat_reg = _pp_stat_reg(intel_dp);
670
671         intel_de_write(dev_priv, pp_ctrl_reg, pp);
672         intel_de_posting_read(dev_priv, pp_ctrl_reg);
673
674         /* Make sure sequencer is idle before allowing subsequent activity */
675         drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
676                     intel_de_read(dev_priv, pp_stat_reg),
677                     intel_de_read(dev_priv, pp_ctrl_reg));
678
679         if ((pp & PANEL_POWER_ON) == 0)
680                 intel_dp->pps.panel_power_off_time = ktime_get_boottime();
681
682         intel_display_power_put(dev_priv,
683                                 intel_aux_power_domain(dig_port),
684                                 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
685 }
686
687 void intel_pps_vdd_off_sync(struct intel_dp *intel_dp)
688 {
689         intel_wakeref_t wakeref;
690
691         if (!intel_dp_is_edp(intel_dp))
692                 return;
693
694         cancel_delayed_work_sync(&intel_dp->pps.panel_vdd_work);
695         /*
696          * vdd might still be enabled due to the delayed vdd off.
697          * Make sure vdd is actually turned off here.
698          */
699         with_intel_pps_lock(intel_dp, wakeref)
700                 intel_pps_vdd_off_sync_unlocked(intel_dp);
701 }
702
703 static void edp_panel_vdd_work(struct work_struct *__work)
704 {
705         struct intel_pps *pps = container_of(to_delayed_work(__work),
706                                              struct intel_pps, panel_vdd_work);
707         struct intel_dp *intel_dp = container_of(pps, struct intel_dp, pps);
708         intel_wakeref_t wakeref;
709
710         with_intel_pps_lock(intel_dp, wakeref) {
711                 if (!intel_dp->pps.want_panel_vdd)
712                         intel_pps_vdd_off_sync_unlocked(intel_dp);
713         }
714 }
715
716 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
717 {
718         unsigned long delay;
719
720         /*
721          * Queue the timer to fire a long time from now (relative to the power
722          * down delay) to keep the panel power up across a sequence of
723          * operations.
724          */
725         delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
726         schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
727 }
728
729 /*
730  * Must be paired with edp_panel_vdd_on().
731  * Must hold pps_mutex around the whole on/off sequence.
732  * Can be nested with intel_pps_vdd_{on,off}() calls.
733  */
734 void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
735 {
736         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
737
738         lockdep_assert_held(&dev_priv->pps_mutex);
739
740         if (!intel_dp_is_edp(intel_dp))
741                 return;
742
743         I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
744                         dp_to_dig_port(intel_dp)->base.base.base.id,
745                         dp_to_dig_port(intel_dp)->base.base.name);
746
747         intel_dp->pps.want_panel_vdd = false;
748
749         if (sync)
750                 intel_pps_vdd_off_sync_unlocked(intel_dp);
751         else
752                 edp_panel_vdd_schedule_off(intel_dp);
753 }
754
755 void intel_pps_on_unlocked(struct intel_dp *intel_dp)
756 {
757         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
758         u32 pp;
759         i915_reg_t pp_ctrl_reg;
760
761         lockdep_assert_held(&dev_priv->pps_mutex);
762
763         if (!intel_dp_is_edp(intel_dp))
764                 return;
765
766         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
767                     dp_to_dig_port(intel_dp)->base.base.base.id,
768                     dp_to_dig_port(intel_dp)->base.base.name);
769
770         if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
771                      "[ENCODER:%d:%s] panel power already on\n",
772                      dp_to_dig_port(intel_dp)->base.base.base.id,
773                      dp_to_dig_port(intel_dp)->base.base.name))
774                 return;
775
776         wait_panel_power_cycle(intel_dp);
777
778         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
779         pp = ilk_get_pp_control(intel_dp);
780         if (IS_GEN(dev_priv, 5)) {
781                 /* ILK workaround: disable reset around power sequence */
782                 pp &= ~PANEL_POWER_RESET;
783                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
784                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
785         }
786
787         pp |= PANEL_POWER_ON;
788         if (!IS_GEN(dev_priv, 5))
789                 pp |= PANEL_POWER_RESET;
790
791         intel_de_write(dev_priv, pp_ctrl_reg, pp);
792         intel_de_posting_read(dev_priv, pp_ctrl_reg);
793
794         wait_panel_on(intel_dp);
795         intel_dp->pps.last_power_on = jiffies;
796
797         if (IS_GEN(dev_priv, 5)) {
798                 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
799                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
800                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
801         }
802 }
803
804 void intel_pps_on(struct intel_dp *intel_dp)
805 {
806         intel_wakeref_t wakeref;
807
808         if (!intel_dp_is_edp(intel_dp))
809                 return;
810
811         with_intel_pps_lock(intel_dp, wakeref)
812                 intel_pps_on_unlocked(intel_dp);
813 }
814
815 void intel_pps_off_unlocked(struct intel_dp *intel_dp)
816 {
817         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
818         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
819         u32 pp;
820         i915_reg_t pp_ctrl_reg;
821
822         lockdep_assert_held(&dev_priv->pps_mutex);
823
824         if (!intel_dp_is_edp(intel_dp))
825                 return;
826
827         drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
828                     dig_port->base.base.base.id, dig_port->base.base.name);
829
830         drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
831                  "Need [ENCODER:%d:%s] VDD to turn off panel\n",
832                  dig_port->base.base.base.id, dig_port->base.base.name);
833
834         pp = ilk_get_pp_control(intel_dp);
835         /* We need to switch off panel power _and_ force vdd, for otherwise some
836          * panels get very unhappy and cease to work. */
837         pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
838                 EDP_BLC_ENABLE);
839
840         pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
841
842         intel_dp->pps.want_panel_vdd = false;
843
844         intel_de_write(dev_priv, pp_ctrl_reg, pp);
845         intel_de_posting_read(dev_priv, pp_ctrl_reg);
846
847         wait_panel_off(intel_dp);
848         intel_dp->pps.panel_power_off_time = ktime_get_boottime();
849
850         /* We got a reference when we enabled the VDD. */
851         intel_display_power_put(dev_priv,
852                                 intel_aux_power_domain(dig_port),
853                                 fetch_and_zero(&intel_dp->pps.vdd_wakeref));
854 }
855
856 void intel_pps_off(struct intel_dp *intel_dp)
857 {
858         intel_wakeref_t wakeref;
859
860         if (!intel_dp_is_edp(intel_dp))
861                 return;
862
863         with_intel_pps_lock(intel_dp, wakeref)
864                 intel_pps_off_unlocked(intel_dp);
865 }
866
867 /* Enable backlight in the panel power control. */
868 void intel_pps_backlight_on(struct intel_dp *intel_dp)
869 {
870         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
871         intel_wakeref_t wakeref;
872
873         /*
874          * If we enable the backlight right away following a panel power
875          * on, we may see slight flicker as the panel syncs with the eDP
876          * link.  So delay a bit to make sure the image is solid before
877          * allowing it to appear.
878          */
879         wait_backlight_on(intel_dp);
880
881         with_intel_pps_lock(intel_dp, wakeref) {
882                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
883                 u32 pp;
884
885                 pp = ilk_get_pp_control(intel_dp);
886                 pp |= EDP_BLC_ENABLE;
887
888                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
889                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
890         }
891 }
892
893 /* Disable backlight in the panel power control. */
894 void intel_pps_backlight_off(struct intel_dp *intel_dp)
895 {
896         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
897         intel_wakeref_t wakeref;
898
899         if (!intel_dp_is_edp(intel_dp))
900                 return;
901
902         with_intel_pps_lock(intel_dp, wakeref) {
903                 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
904                 u32 pp;
905
906                 pp = ilk_get_pp_control(intel_dp);
907                 pp &= ~EDP_BLC_ENABLE;
908
909                 intel_de_write(dev_priv, pp_ctrl_reg, pp);
910                 intel_de_posting_read(dev_priv, pp_ctrl_reg);
911         }
912
913         intel_dp->pps.last_backlight_off = jiffies;
914         edp_wait_backlight_off(intel_dp);
915 }
916
917 /*
918  * Hook for controlling the panel power control backlight through the bl_power
919  * sysfs attribute. Take care to handle multiple calls.
920  */
921 void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
922 {
923         struct drm_i915_private *i915 = to_i915(connector->base.dev);
924         struct intel_dp *intel_dp = intel_attached_dp(connector);
925         intel_wakeref_t wakeref;
926         bool is_enabled;
927
928         is_enabled = false;
929         with_intel_pps_lock(intel_dp, wakeref)
930                 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
931         if (is_enabled == enable)
932                 return;
933
934         drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
935                     enable ? "enable" : "disable");
936
937         if (enable)
938                 intel_pps_backlight_on(intel_dp);
939         else
940                 intel_pps_backlight_off(intel_dp);
941 }
942
943 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
944 {
945         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
946         struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
947         enum pipe pipe = intel_dp->pps.pps_pipe;
948         i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
949
950         drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
951
952         if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
953                 return;
954
955         intel_pps_vdd_off_sync_unlocked(intel_dp);
956
957         /*
958          * VLV seems to get confused when multiple power sequencers
959          * have the same port selected (even if only one has power/vdd
960          * enabled). The failure manifests as vlv_wait_port_ready() failing
961          * CHV on the other hand doesn't seem to mind having the same port
962          * selected in multiple power sequencers, but let's clear the
963          * port select always when logically disconnecting a power sequencer
964          * from a port.
965          */
966         drm_dbg_kms(&dev_priv->drm,
967                     "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
968                     pipe_name(pipe), dig_port->base.base.base.id,
969                     dig_port->base.base.name);
970         intel_de_write(dev_priv, pp_on_reg, 0);
971         intel_de_posting_read(dev_priv, pp_on_reg);
972
973         intel_dp->pps.pps_pipe = INVALID_PIPE;
974 }
975
976 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
977                                       enum pipe pipe)
978 {
979         struct intel_encoder *encoder;
980
981         lockdep_assert_held(&dev_priv->pps_mutex);
982
983         for_each_intel_dp(&dev_priv->drm, encoder) {
984                 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
985
986                 drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
987                          "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
988                          pipe_name(pipe), encoder->base.base.id,
989                          encoder->base.name);
990
991                 if (intel_dp->pps.pps_pipe != pipe)
992                         continue;
993
994                 drm_dbg_kms(&dev_priv->drm,
995                             "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
996                             pipe_name(pipe), encoder->base.base.id,
997                             encoder->base.name);
998
999                 /* make sure vdd is off before we steal it */
1000                 vlv_detach_power_sequencer(intel_dp);
1001         }
1002 }
1003
1004 void vlv_pps_init(struct intel_encoder *encoder,
1005                   const struct intel_crtc_state *crtc_state)
1006 {
1007         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1008         struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1009         struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1010
1011         lockdep_assert_held(&dev_priv->pps_mutex);
1012
1013         drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
1014
1015         if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
1016             intel_dp->pps.pps_pipe != crtc->pipe) {
1017                 /*
1018                  * If another power sequencer was being used on this
1019                  * port previously make sure to turn off vdd there while
1020                  * we still have control of it.
1021                  */
1022                 vlv_detach_power_sequencer(intel_dp);
1023         }
1024
1025         /*
1026          * We may be stealing the power
1027          * sequencer from another port.
1028          */
1029         vlv_steal_power_sequencer(dev_priv, crtc->pipe);
1030
1031         intel_dp->pps.active_pipe = crtc->pipe;
1032
1033         if (!intel_dp_is_edp(intel_dp))
1034                 return;
1035
1036         /* now it's all ours */
1037         intel_dp->pps.pps_pipe = crtc->pipe;
1038
1039         drm_dbg_kms(&dev_priv->drm,
1040                     "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
1041                     pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
1042                     encoder->base.name);
1043
1044         /* init power sequencer on this pipe and port */
1045         pps_init_delays(intel_dp);
1046         pps_init_registers(intel_dp, true);
1047 }
1048
1049 static void intel_pps_vdd_sanitize(struct intel_dp *intel_dp)
1050 {
1051         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1052         struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1053
1054         lockdep_assert_held(&dev_priv->pps_mutex);
1055
1056         if (!edp_have_panel_vdd(intel_dp))
1057                 return;
1058
1059         /*
1060          * The VDD bit needs a power domain reference, so if the bit is
1061          * already enabled when we boot or resume, grab this reference and
1062          * schedule a vdd off, so we don't hold on to the reference
1063          * indefinitely.
1064          */
1065         drm_dbg_kms(&dev_priv->drm,
1066                     "VDD left on by BIOS, adjusting state tracking\n");
1067         drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
1068         intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
1069                                                             intel_aux_power_domain(dig_port));
1070
1071         edp_panel_vdd_schedule_off(intel_dp);
1072 }
1073
1074 bool intel_pps_have_power(struct intel_dp *intel_dp)
1075 {
1076         intel_wakeref_t wakeref;
1077         bool have_power = false;
1078
1079         with_intel_pps_lock(intel_dp, wakeref) {
1080                 have_power = edp_have_panel_power(intel_dp) &&
1081                                                   edp_have_panel_vdd(intel_dp);
1082         }
1083
1084         return have_power;
1085 }
1086
1087 static void pps_init_timestamps(struct intel_dp *intel_dp)
1088 {
1089         intel_dp->pps.panel_power_off_time = ktime_get_boottime();
1090         intel_dp->pps.last_power_on = jiffies;
1091         intel_dp->pps.last_backlight_off = jiffies;
1092 }
1093
1094 static void
1095 intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
1096 {
1097         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1098         u32 pp_on, pp_off, pp_ctl;
1099         struct pps_registers regs;
1100
1101         intel_pps_get_registers(intel_dp, &regs);
1102
1103         pp_ctl = ilk_get_pp_control(intel_dp);
1104
1105         /* Ensure PPS is unlocked */
1106         if (!HAS_DDI(dev_priv))
1107                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1108
1109         pp_on = intel_de_read(dev_priv, regs.pp_on);
1110         pp_off = intel_de_read(dev_priv, regs.pp_off);
1111
1112         /* Pull timing values out of registers */
1113         seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
1114         seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
1115         seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
1116         seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
1117
1118         if (i915_mmio_reg_valid(regs.pp_div)) {
1119                 u32 pp_div;
1120
1121                 pp_div = intel_de_read(dev_priv, regs.pp_div);
1122
1123                 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
1124         } else {
1125                 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
1126         }
1127 }
1128
1129 static void
1130 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
1131 {
1132         DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
1133                       state_name,
1134                       seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
1135 }
1136
1137 static void
1138 intel_pps_verify_state(struct intel_dp *intel_dp)
1139 {
1140         struct edp_power_seq hw;
1141         struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
1142
1143         intel_pps_readout_hw_state(intel_dp, &hw);
1144
1145         if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
1146             hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
1147                 DRM_ERROR("PPS state mismatch\n");
1148                 intel_pps_dump_state("sw", sw);
1149                 intel_pps_dump_state("hw", &hw);
1150         }
1151 }
1152
1153 static void pps_init_delays(struct intel_dp *intel_dp)
1154 {
1155         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1156         struct edp_power_seq cur, vbt, spec,
1157                 *final = &intel_dp->pps.pps_delays;
1158
1159         lockdep_assert_held(&dev_priv->pps_mutex);
1160
1161         /* already initialized? */
1162         if (final->t11_t12 != 0)
1163                 return;
1164
1165         intel_pps_readout_hw_state(intel_dp, &cur);
1166
1167         intel_pps_dump_state("cur", &cur);
1168
1169         vbt = dev_priv->vbt.edp.pps;
1170         /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
1171          * of 500ms appears to be too short. Ocassionally the panel
1172          * just fails to power back on. Increasing the delay to 800ms
1173          * seems sufficient to avoid this problem.
1174          */
1175         if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
1176                 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
1177                 drm_dbg_kms(&dev_priv->drm,
1178                             "Increasing T12 panel delay as per the quirk to %d\n",
1179                             vbt.t11_t12);
1180         }
1181         /* T11_T12 delay is special and actually in units of 100ms, but zero
1182          * based in the hw (so we need to add 100 ms). But the sw vbt
1183          * table multiplies it with 1000 to make it in units of 100usec,
1184          * too. */
1185         vbt.t11_t12 += 100 * 10;
1186
1187         /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
1188          * our hw here, which are all in 100usec. */
1189         spec.t1_t3 = 210 * 10;
1190         spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
1191         spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
1192         spec.t10 = 500 * 10;
1193         /* This one is special and actually in units of 100ms, but zero
1194          * based in the hw (so we need to add 100 ms). But the sw vbt
1195          * table multiplies it with 1000 to make it in units of 100usec,
1196          * too. */
1197         spec.t11_t12 = (510 + 100) * 10;
1198
1199         intel_pps_dump_state("vbt", &vbt);
1200
1201         /* Use the max of the register settings and vbt. If both are
1202          * unset, fall back to the spec limits. */
1203 #define assign_final(field)     final->field = (max(cur.field, vbt.field) == 0 ? \
1204                                        spec.field : \
1205                                        max(cur.field, vbt.field))
1206         assign_final(t1_t3);
1207         assign_final(t8);
1208         assign_final(t9);
1209         assign_final(t10);
1210         assign_final(t11_t12);
1211 #undef assign_final
1212
1213 #define get_delay(field)        (DIV_ROUND_UP(final->field, 10))
1214         intel_dp->pps.panel_power_up_delay = get_delay(t1_t3);
1215         intel_dp->pps.backlight_on_delay = get_delay(t8);
1216         intel_dp->pps.backlight_off_delay = get_delay(t9);
1217         intel_dp->pps.panel_power_down_delay = get_delay(t10);
1218         intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
1219 #undef get_delay
1220
1221         drm_dbg_kms(&dev_priv->drm,
1222                     "panel power up delay %d, power down delay %d, power cycle delay %d\n",
1223                     intel_dp->pps.panel_power_up_delay,
1224                     intel_dp->pps.panel_power_down_delay,
1225                     intel_dp->pps.panel_power_cycle_delay);
1226
1227         drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
1228                     intel_dp->pps.backlight_on_delay,
1229                     intel_dp->pps.backlight_off_delay);
1230
1231         /*
1232          * We override the HW backlight delays to 1 because we do manual waits
1233          * on them. For T8, even BSpec recommends doing it. For T9, if we
1234          * don't do this, we'll end up waiting for the backlight off delay
1235          * twice: once when we do the manual sleep, and once when we disable
1236          * the panel and wait for the PP_STATUS bit to become zero.
1237          */
1238         final->t8 = 1;
1239         final->t9 = 1;
1240
1241         /*
1242          * HW has only a 100msec granularity for t11_t12 so round it up
1243          * accordingly.
1244          */
1245         final->t11_t12 = roundup(final->t11_t12, 100 * 10);
1246 }
1247
1248 static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
1249 {
1250         struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1251         u32 pp_on, pp_off, port_sel = 0;
1252         int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
1253         struct pps_registers regs;
1254         enum port port = dp_to_dig_port(intel_dp)->base.port;
1255         const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
1256
1257         lockdep_assert_held(&dev_priv->pps_mutex);
1258
1259         intel_pps_get_registers(intel_dp, &regs);
1260
1261         /*
1262          * On some VLV machines the BIOS can leave the VDD
1263          * enabled even on power sequencers which aren't
1264          * hooked up to any port. This would mess up the
1265          * power domain tracking the first time we pick
1266          * one of these power sequencers for use since
1267          * intel_pps_vdd_on_unlocked() would notice that the VDD was
1268          * already on and therefore wouldn't grab the power
1269          * domain reference. Disable VDD first to avoid this.
1270          * This also avoids spuriously turning the VDD on as
1271          * soon as the new power sequencer gets initialized.
1272          */
1273         if (force_disable_vdd) {
1274                 u32 pp = ilk_get_pp_control(intel_dp);
1275
1276                 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
1277                          "Panel power already on\n");
1278
1279                 if (pp & EDP_FORCE_VDD)
1280                         drm_dbg_kms(&dev_priv->drm,
1281                                     "VDD already on, disabling first\n");
1282
1283                 pp &= ~EDP_FORCE_VDD;
1284
1285                 intel_de_write(dev_priv, regs.pp_ctrl, pp);
1286         }
1287
1288         pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
1289                 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
1290         pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
1291                 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
1292
1293         /* Haswell doesn't have any port selection bits for the panel
1294          * power sequencer any more. */
1295         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1296                 port_sel = PANEL_PORT_SELECT_VLV(port);
1297         } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
1298                 switch (port) {
1299                 case PORT_A:
1300                         port_sel = PANEL_PORT_SELECT_DPA;
1301                         break;
1302                 case PORT_C:
1303                         port_sel = PANEL_PORT_SELECT_DPC;
1304                         break;
1305                 case PORT_D:
1306                         port_sel = PANEL_PORT_SELECT_DPD;
1307                         break;
1308                 default:
1309                         MISSING_CASE(port);
1310                         break;
1311                 }
1312         }
1313
1314         pp_on |= port_sel;
1315
1316         intel_de_write(dev_priv, regs.pp_on, pp_on);
1317         intel_de_write(dev_priv, regs.pp_off, pp_off);
1318
1319         /*
1320          * Compute the divisor for the pp clock, simply match the Bspec formula.
1321          */
1322         if (i915_mmio_reg_valid(regs.pp_div)) {
1323                 intel_de_write(dev_priv, regs.pp_div,
1324                                REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
1325         } else {
1326                 u32 pp_ctl;
1327
1328                 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
1329                 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
1330                 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
1331                 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1332         }
1333
1334         drm_dbg_kms(&dev_priv->drm,
1335                     "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
1336                     intel_de_read(dev_priv, regs.pp_on),
1337                     intel_de_read(dev_priv, regs.pp_off),
1338                     i915_mmio_reg_valid(regs.pp_div) ?
1339                     intel_de_read(dev_priv, regs.pp_div) :
1340                     (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
1341 }
1342
1343 void intel_pps_encoder_reset(struct intel_dp *intel_dp)
1344 {
1345         struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1346         intel_wakeref_t wakeref;
1347
1348         if (!intel_dp_is_edp(intel_dp))
1349                 return;
1350
1351         with_intel_pps_lock(intel_dp, wakeref) {
1352                 /*
1353                  * Reinit the power sequencer also on the resume path, in case
1354                  * BIOS did something nasty with it.
1355                  */
1356                 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1357                         vlv_initial_power_sequencer_setup(intel_dp);
1358
1359                 pps_init_delays(intel_dp);
1360                 pps_init_registers(intel_dp, false);
1361
1362                 intel_pps_vdd_sanitize(intel_dp);
1363         }
1364 }
1365
1366 void intel_pps_init(struct intel_dp *intel_dp)
1367 {
1368         INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
1369
1370         pps_init_timestamps(intel_dp);
1371
1372         intel_pps_encoder_reset(intel_dp);
1373 }
1374
1375 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
1376 {
1377         int pps_num;
1378         int pps_idx;
1379
1380         if (HAS_DDI(dev_priv))
1381                 return;
1382         /*
1383          * This w/a is needed at least on CPT/PPT, but to be sure apply it
1384          * everywhere where registers can be write protected.
1385          */
1386         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1387                 pps_num = 2;
1388         else
1389                 pps_num = 1;
1390
1391         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
1392                 u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
1393
1394                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
1395                 intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
1396         }
1397 }
1398
1399 void intel_pps_setup(struct drm_i915_private *i915)
1400 {
1401         if (HAS_PCH_SPLIT(i915) || IS_GEN9_LP(i915))
1402                 i915->pps_mmio_base = PCH_PPS_BASE;
1403         else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1404                 i915->pps_mmio_base = VLV_PPS_BASE;
1405         else
1406                 i915->pps_mmio_base = PPS_BASE;
1407 }