fbdev: Garbage collect fbdev scrolling acceleration, part 1 (from TODO list)
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_rps.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <drm/i915_drm.h>
7
8 #include "i915_drv.h"
9 #include "intel_breadcrumbs.h"
10 #include "intel_gt.h"
11 #include "intel_gt_clock_utils.h"
12 #include "intel_gt_irq.h"
13 #include "intel_gt_pm_irq.h"
14 #include "intel_rps.h"
15 #include "intel_sideband.h"
16 #include "../../../platform/x86/intel_ips.h"
17
18 #define BUSY_MAX_EI     20u /* ms */
19
20 /*
21  * Lock protecting IPS related data structures
22  */
23 static DEFINE_SPINLOCK(mchdev_lock);
24
25 static struct intel_gt *rps_to_gt(struct intel_rps *rps)
26 {
27         return container_of(rps, struct intel_gt, rps);
28 }
29
30 static struct drm_i915_private *rps_to_i915(struct intel_rps *rps)
31 {
32         return rps_to_gt(rps)->i915;
33 }
34
35 static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
36 {
37         return rps_to_gt(rps)->uncore;
38 }
39
40 static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
41 {
42         struct intel_gt *gt = rps_to_gt(rps);
43
44         return &gt->uc.guc.slpc;
45 }
46
47 static bool rps_uses_slpc(struct intel_rps *rps)
48 {
49         struct intel_gt *gt = rps_to_gt(rps);
50
51         return intel_uc_uses_guc_slpc(&gt->uc);
52 }
53
54 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
55 {
56         return mask & ~rps->pm_intrmsk_mbz;
57 }
58
59 static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
60 {
61         intel_uncore_write_fw(uncore, reg, val);
62 }
63
64 static void rps_timer(struct timer_list *t)
65 {
66         struct intel_rps *rps = from_timer(rps, t, timer);
67         struct intel_engine_cs *engine;
68         ktime_t dt, last, timestamp;
69         enum intel_engine_id id;
70         s64 max_busy[3] = {};
71
72         timestamp = 0;
73         for_each_engine(engine, rps_to_gt(rps), id) {
74                 s64 busy;
75                 int i;
76
77                 dt = intel_engine_get_busy_time(engine, &timestamp);
78                 last = engine->stats.rps;
79                 engine->stats.rps = dt;
80
81                 busy = ktime_to_ns(ktime_sub(dt, last));
82                 for (i = 0; i < ARRAY_SIZE(max_busy); i++) {
83                         if (busy > max_busy[i])
84                                 swap(busy, max_busy[i]);
85                 }
86         }
87         last = rps->pm_timestamp;
88         rps->pm_timestamp = timestamp;
89
90         if (intel_rps_is_active(rps)) {
91                 s64 busy;
92                 int i;
93
94                 dt = ktime_sub(timestamp, last);
95
96                 /*
97                  * Our goal is to evaluate each engine independently, so we run
98                  * at the lowest clocks required to sustain the heaviest
99                  * workload. However, a task may be split into sequential
100                  * dependent operations across a set of engines, such that
101                  * the independent contributions do not account for high load,
102                  * but overall the task is GPU bound. For example, consider
103                  * video decode on vcs followed by colour post-processing
104                  * on vecs, followed by general post-processing on rcs.
105                  * Since multi-engines being active does imply a single
106                  * continuous workload across all engines, we hedge our
107                  * bets by only contributing a factor of the distributed
108                  * load into our busyness calculation.
109                  */
110                 busy = max_busy[0];
111                 for (i = 1; i < ARRAY_SIZE(max_busy); i++) {
112                         if (!max_busy[i])
113                                 break;
114
115                         busy += div_u64(max_busy[i], 1 << i);
116                 }
117                 GT_TRACE(rps_to_gt(rps),
118                          "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
119                          busy, (int)div64_u64(100 * busy, dt),
120                          max_busy[0], max_busy[1], max_busy[2],
121                          rps->pm_interval);
122
123                 if (100 * busy > rps->power.up_threshold * dt &&
124                     rps->cur_freq < rps->max_freq_softlimit) {
125                         rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
126                         rps->pm_interval = 1;
127                         schedule_work(&rps->work);
128                 } else if (100 * busy < rps->power.down_threshold * dt &&
129                            rps->cur_freq > rps->min_freq_softlimit) {
130                         rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
131                         rps->pm_interval = 1;
132                         schedule_work(&rps->work);
133                 } else {
134                         rps->last_adj = 0;
135                 }
136
137                 mod_timer(&rps->timer,
138                           jiffies + msecs_to_jiffies(rps->pm_interval));
139                 rps->pm_interval = min(rps->pm_interval * 2, BUSY_MAX_EI);
140         }
141 }
142
143 static void rps_start_timer(struct intel_rps *rps)
144 {
145         rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
146         rps->pm_interval = 1;
147         mod_timer(&rps->timer, jiffies + 1);
148 }
149
150 static void rps_stop_timer(struct intel_rps *rps)
151 {
152         del_timer_sync(&rps->timer);
153         rps->pm_timestamp = ktime_sub(ktime_get(), rps->pm_timestamp);
154         cancel_work_sync(&rps->work);
155 }
156
157 static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
158 {
159         u32 mask = 0;
160
161         /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */
162         if (val > rps->min_freq_softlimit)
163                 mask |= (GEN6_PM_RP_UP_EI_EXPIRED |
164                          GEN6_PM_RP_DOWN_THRESHOLD |
165                          GEN6_PM_RP_DOWN_TIMEOUT);
166
167         if (val < rps->max_freq_softlimit)
168                 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
169
170         mask &= rps->pm_events;
171
172         return rps_pm_sanitize_mask(rps, ~mask);
173 }
174
175 static void rps_reset_ei(struct intel_rps *rps)
176 {
177         memset(&rps->ei, 0, sizeof(rps->ei));
178 }
179
180 static void rps_enable_interrupts(struct intel_rps *rps)
181 {
182         struct intel_gt *gt = rps_to_gt(rps);
183
184         GEM_BUG_ON(rps_uses_slpc(rps));
185
186         GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
187                  rps->pm_events, rps_pm_mask(rps, rps->last_freq));
188
189         rps_reset_ei(rps);
190
191         spin_lock_irq(&gt->irq_lock);
192         gen6_gt_pm_enable_irq(gt, rps->pm_events);
193         spin_unlock_irq(&gt->irq_lock);
194
195         intel_uncore_write(gt->uncore,
196                            GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq));
197 }
198
199 static void gen6_rps_reset_interrupts(struct intel_rps *rps)
200 {
201         gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS);
202 }
203
204 static void gen11_rps_reset_interrupts(struct intel_rps *rps)
205 {
206         while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM))
207                 ;
208 }
209
210 static void rps_reset_interrupts(struct intel_rps *rps)
211 {
212         struct intel_gt *gt = rps_to_gt(rps);
213
214         spin_lock_irq(&gt->irq_lock);
215         if (GRAPHICS_VER(gt->i915) >= 11)
216                 gen11_rps_reset_interrupts(rps);
217         else
218                 gen6_rps_reset_interrupts(rps);
219
220         rps->pm_iir = 0;
221         spin_unlock_irq(&gt->irq_lock);
222 }
223
224 static void rps_disable_interrupts(struct intel_rps *rps)
225 {
226         struct intel_gt *gt = rps_to_gt(rps);
227
228         intel_uncore_write(gt->uncore,
229                            GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
230
231         spin_lock_irq(&gt->irq_lock);
232         gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
233         spin_unlock_irq(&gt->irq_lock);
234
235         intel_synchronize_irq(gt->i915);
236
237         /*
238          * Now that we will not be generating any more work, flush any
239          * outstanding tasks. As we are called on the RPS idle path,
240          * we will reset the GPU to minimum frequencies, so the current
241          * state of the worker can be discarded.
242          */
243         cancel_work_sync(&rps->work);
244
245         rps_reset_interrupts(rps);
246         GT_TRACE(gt, "interrupts:off\n");
247 }
248
249 static const struct cparams {
250         u16 i;
251         u16 t;
252         u16 m;
253         u16 c;
254 } cparams[] = {
255         { 1, 1333, 301, 28664 },
256         { 1, 1066, 294, 24460 },
257         { 1, 800, 294, 25192 },
258         { 0, 1333, 276, 27605 },
259         { 0, 1066, 276, 27605 },
260         { 0, 800, 231, 23784 },
261 };
262
263 static void gen5_rps_init(struct intel_rps *rps)
264 {
265         struct drm_i915_private *i915 = rps_to_i915(rps);
266         struct intel_uncore *uncore = rps_to_uncore(rps);
267         u8 fmax, fmin, fstart;
268         u32 rgvmodectl;
269         int c_m, i;
270
271         if (i915->fsb_freq <= 3200)
272                 c_m = 0;
273         else if (i915->fsb_freq <= 4800)
274                 c_m = 1;
275         else
276                 c_m = 2;
277
278         for (i = 0; i < ARRAY_SIZE(cparams); i++) {
279                 if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) {
280                         rps->ips.m = cparams[i].m;
281                         rps->ips.c = cparams[i].c;
282                         break;
283                 }
284         }
285
286         rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
287
288         /* Set up min, max, and cur for interrupt handling */
289         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
290         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
291         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
292                 MEMMODE_FSTART_SHIFT;
293         drm_dbg(&i915->drm, "fmax: %d, fmin: %d, fstart: %d\n",
294                 fmax, fmin, fstart);
295
296         rps->min_freq = fmax;
297         rps->efficient_freq = fstart;
298         rps->max_freq = fmin;
299 }
300
301 static unsigned long
302 __ips_chipset_val(struct intel_ips *ips)
303 {
304         struct intel_uncore *uncore =
305                 rps_to_uncore(container_of(ips, struct intel_rps, ips));
306         unsigned long now = jiffies_to_msecs(jiffies), dt;
307         unsigned long result;
308         u64 total, delta;
309
310         lockdep_assert_held(&mchdev_lock);
311
312         /*
313          * Prevent division-by-zero if we are asking too fast.
314          * Also, we don't get interesting results if we are polling
315          * faster than once in 10ms, so just return the saved value
316          * in such cases.
317          */
318         dt = now - ips->last_time1;
319         if (dt <= 10)
320                 return ips->chipset_power;
321
322         /* FIXME: handle per-counter overflow */
323         total = intel_uncore_read(uncore, DMIEC);
324         total += intel_uncore_read(uncore, DDREC);
325         total += intel_uncore_read(uncore, CSIEC);
326
327         delta = total - ips->last_count1;
328
329         result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10);
330
331         ips->last_count1 = total;
332         ips->last_time1 = now;
333
334         ips->chipset_power = result;
335
336         return result;
337 }
338
339 static unsigned long ips_mch_val(struct intel_uncore *uncore)
340 {
341         unsigned int m, x, b;
342         u32 tsfs;
343
344         tsfs = intel_uncore_read(uncore, TSFS);
345         x = intel_uncore_read8(uncore, TR1);
346
347         b = tsfs & TSFS_INTR_MASK;
348         m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT;
349
350         return m * x / 127 - b;
351 }
352
353 static int _pxvid_to_vd(u8 pxvid)
354 {
355         if (pxvid == 0)
356                 return 0;
357
358         if (pxvid >= 8 && pxvid < 31)
359                 pxvid = 31;
360
361         return (pxvid + 2) * 125;
362 }
363
364 static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid)
365 {
366         const int vd = _pxvid_to_vd(pxvid);
367
368         if (INTEL_INFO(i915)->is_mobile)
369                 return max(vd - 1125, 0);
370
371         return vd;
372 }
373
374 static void __gen5_ips_update(struct intel_ips *ips)
375 {
376         struct intel_uncore *uncore =
377                 rps_to_uncore(container_of(ips, struct intel_rps, ips));
378         u64 now, delta, dt;
379         u32 count;
380
381         lockdep_assert_held(&mchdev_lock);
382
383         now = ktime_get_raw_ns();
384         dt = now - ips->last_time2;
385         do_div(dt, NSEC_PER_MSEC);
386
387         /* Don't divide by 0 */
388         if (dt <= 10)
389                 return;
390
391         count = intel_uncore_read(uncore, GFXEC);
392         delta = count - ips->last_count2;
393
394         ips->last_count2 = count;
395         ips->last_time2 = now;
396
397         /* More magic constants... */
398         ips->gfx_power = div_u64(delta * 1181, dt * 10);
399 }
400
401 static void gen5_rps_update(struct intel_rps *rps)
402 {
403         spin_lock_irq(&mchdev_lock);
404         __gen5_ips_update(&rps->ips);
405         spin_unlock_irq(&mchdev_lock);
406 }
407
408 static unsigned int gen5_invert_freq(struct intel_rps *rps,
409                                      unsigned int val)
410 {
411         /* Invert the frequency bin into an ips delay */
412         val = rps->max_freq - val;
413         val = rps->min_freq + val;
414
415         return val;
416 }
417
418 static int __gen5_rps_set(struct intel_rps *rps, u8 val)
419 {
420         struct intel_uncore *uncore = rps_to_uncore(rps);
421         u16 rgvswctl;
422
423         lockdep_assert_held(&mchdev_lock);
424
425         rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
426         if (rgvswctl & MEMCTL_CMD_STS) {
427                 DRM_DEBUG("gpu busy, RCS change rejected\n");
428                 return -EBUSY; /* still busy with another command */
429         }
430
431         /* Invert the frequency bin into an ips delay */
432         val = gen5_invert_freq(rps, val);
433
434         rgvswctl =
435                 (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
436                 (val << MEMCTL_FREQ_SHIFT) |
437                 MEMCTL_SFCAVM;
438         intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
439         intel_uncore_posting_read16(uncore, MEMSWCTL);
440
441         rgvswctl |= MEMCTL_CMD_STS;
442         intel_uncore_write16(uncore, MEMSWCTL, rgvswctl);
443
444         return 0;
445 }
446
447 static int gen5_rps_set(struct intel_rps *rps, u8 val)
448 {
449         int err;
450
451         spin_lock_irq(&mchdev_lock);
452         err = __gen5_rps_set(rps, val);
453         spin_unlock_irq(&mchdev_lock);
454
455         return err;
456 }
457
458 static unsigned long intel_pxfreq(u32 vidfreq)
459 {
460         int div = (vidfreq & 0x3f0000) >> 16;
461         int post = (vidfreq & 0x3000) >> 12;
462         int pre = (vidfreq & 0x7);
463
464         if (!pre)
465                 return 0;
466
467         return div * 133333 / (pre << post);
468 }
469
470 static unsigned int init_emon(struct intel_uncore *uncore)
471 {
472         u8 pxw[16];
473         int i;
474
475         /* Disable to program */
476         intel_uncore_write(uncore, ECR, 0);
477         intel_uncore_posting_read(uncore, ECR);
478
479         /* Program energy weights for various events */
480         intel_uncore_write(uncore, SDEW, 0x15040d00);
481         intel_uncore_write(uncore, CSIEW0, 0x007f0000);
482         intel_uncore_write(uncore, CSIEW1, 0x1e220004);
483         intel_uncore_write(uncore, CSIEW2, 0x04000004);
484
485         for (i = 0; i < 5; i++)
486                 intel_uncore_write(uncore, PEW(i), 0);
487         for (i = 0; i < 3; i++)
488                 intel_uncore_write(uncore, DEW(i), 0);
489
490         /* Program P-state weights to account for frequency power adjustment */
491         for (i = 0; i < 16; i++) {
492                 u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i));
493                 unsigned int freq = intel_pxfreq(pxvidfreq);
494                 unsigned int vid =
495                         (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
496                 unsigned int val;
497
498                 val = vid * vid * freq / 1000 * 255;
499                 val /= 127 * 127 * 900;
500
501                 pxw[i] = val;
502         }
503         /* Render standby states get 0 weight */
504         pxw[14] = 0;
505         pxw[15] = 0;
506
507         for (i = 0; i < 4; i++) {
508                 intel_uncore_write(uncore, PXW(i),
509                                    pxw[i * 4 + 0] << 24 |
510                                    pxw[i * 4 + 1] << 16 |
511                                    pxw[i * 4 + 2] <<  8 |
512                                    pxw[i * 4 + 3] <<  0);
513         }
514
515         /* Adjust magic regs to magic values (more experimental results) */
516         intel_uncore_write(uncore, OGW0, 0);
517         intel_uncore_write(uncore, OGW1, 0);
518         intel_uncore_write(uncore, EG0, 0x00007f00);
519         intel_uncore_write(uncore, EG1, 0x0000000e);
520         intel_uncore_write(uncore, EG2, 0x000e0000);
521         intel_uncore_write(uncore, EG3, 0x68000300);
522         intel_uncore_write(uncore, EG4, 0x42000000);
523         intel_uncore_write(uncore, EG5, 0x00140031);
524         intel_uncore_write(uncore, EG6, 0);
525         intel_uncore_write(uncore, EG7, 0);
526
527         for (i = 0; i < 8; i++)
528                 intel_uncore_write(uncore, PXWL(i), 0);
529
530         /* Enable PMON + select events */
531         intel_uncore_write(uncore, ECR, 0x80000019);
532
533         return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK;
534 }
535
536 static bool gen5_rps_enable(struct intel_rps *rps)
537 {
538         struct drm_i915_private *i915 = rps_to_i915(rps);
539         struct intel_uncore *uncore = rps_to_uncore(rps);
540         u8 fstart, vstart;
541         u32 rgvmodectl;
542
543         spin_lock_irq(&mchdev_lock);
544
545         rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
546
547         /* Enable temp reporting */
548         intel_uncore_write16(uncore, PMMISC,
549                              intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN);
550         intel_uncore_write16(uncore, TSC1,
551                              intel_uncore_read16(uncore, TSC1) | TSE);
552
553         /* 100ms RC evaluation intervals */
554         intel_uncore_write(uncore, RCUPEI, 100000);
555         intel_uncore_write(uncore, RCDNEI, 100000);
556
557         /* Set max/min thresholds to 90ms and 80ms respectively */
558         intel_uncore_write(uncore, RCBMAXAVG, 90000);
559         intel_uncore_write(uncore, RCBMINAVG, 80000);
560
561         intel_uncore_write(uncore, MEMIHYST, 1);
562
563         /* Set up min, max, and cur for interrupt handling */
564         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
565                 MEMMODE_FSTART_SHIFT;
566
567         vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) &
568                   PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT;
569
570         intel_uncore_write(uncore,
571                            MEMINTREN,
572                            MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
573
574         intel_uncore_write(uncore, VIDSTART, vstart);
575         intel_uncore_posting_read(uncore, VIDSTART);
576
577         rgvmodectl |= MEMMODE_SWMODE_EN;
578         intel_uncore_write(uncore, MEMMODECTL, rgvmodectl);
579
580         if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) &
581                              MEMCTL_CMD_STS) == 0, 10))
582                 drm_err(&uncore->i915->drm,
583                         "stuck trying to change perf mode\n");
584         mdelay(1);
585
586         __gen5_rps_set(rps, rps->cur_freq);
587
588         rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC);
589         rps->ips.last_count1 += intel_uncore_read(uncore, DDREC);
590         rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC);
591         rps->ips.last_time1 = jiffies_to_msecs(jiffies);
592
593         rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
594         rps->ips.last_time2 = ktime_get_raw_ns();
595
596         spin_lock(&i915->irq_lock);
597         ilk_enable_display_irq(i915, DE_PCU_EVENT);
598         spin_unlock(&i915->irq_lock);
599
600         spin_unlock_irq(&mchdev_lock);
601
602         rps->ips.corr = init_emon(uncore);
603
604         return true;
605 }
606
607 static void gen5_rps_disable(struct intel_rps *rps)
608 {
609         struct drm_i915_private *i915 = rps_to_i915(rps);
610         struct intel_uncore *uncore = rps_to_uncore(rps);
611         u16 rgvswctl;
612
613         spin_lock_irq(&mchdev_lock);
614
615         spin_lock(&i915->irq_lock);
616         ilk_disable_display_irq(i915, DE_PCU_EVENT);
617         spin_unlock(&i915->irq_lock);
618
619         rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
620
621         /* Ack interrupts, disable EFC interrupt */
622         intel_uncore_write(uncore, MEMINTREN,
623                            intel_uncore_read(uncore, MEMINTREN) &
624                            ~MEMINT_EVAL_CHG_EN);
625         intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
626
627         /* Go back to the starting frequency */
628         __gen5_rps_set(rps, rps->idle_freq);
629         mdelay(1);
630         rgvswctl |= MEMCTL_CMD_STS;
631         intel_uncore_write(uncore, MEMSWCTL, rgvswctl);
632         mdelay(1);
633
634         spin_unlock_irq(&mchdev_lock);
635 }
636
637 static u32 rps_limits(struct intel_rps *rps, u8 val)
638 {
639         u32 limits;
640
641         /*
642          * Only set the down limit when we've reached the lowest level to avoid
643          * getting more interrupts, otherwise leave this clear. This prevents a
644          * race in the hw when coming out of rc6: There's a tiny window where
645          * the hw runs at the minimal clock before selecting the desired
646          * frequency, if the down threshold expires in that window we will not
647          * receive a down interrupt.
648          */
649         if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
650                 limits = rps->max_freq_softlimit << 23;
651                 if (val <= rps->min_freq_softlimit)
652                         limits |= rps->min_freq_softlimit << 14;
653         } else {
654                 limits = rps->max_freq_softlimit << 24;
655                 if (val <= rps->min_freq_softlimit)
656                         limits |= rps->min_freq_softlimit << 16;
657         }
658
659         return limits;
660 }
661
662 static void rps_set_power(struct intel_rps *rps, int new_power)
663 {
664         struct intel_gt *gt = rps_to_gt(rps);
665         struct intel_uncore *uncore = gt->uncore;
666         u32 threshold_up = 0, threshold_down = 0; /* in % */
667         u32 ei_up = 0, ei_down = 0;
668
669         lockdep_assert_held(&rps->power.mutex);
670
671         if (new_power == rps->power.mode)
672                 return;
673
674         threshold_up = 95;
675         threshold_down = 85;
676
677         /* Note the units here are not exactly 1us, but 1280ns. */
678         switch (new_power) {
679         case LOW_POWER:
680                 ei_up = 16000;
681                 ei_down = 32000;
682                 break;
683
684         case BETWEEN:
685                 ei_up = 13000;
686                 ei_down = 32000;
687                 break;
688
689         case HIGH_POWER:
690                 ei_up = 10000;
691                 ei_down = 32000;
692                 break;
693         }
694
695         /* When byt can survive without system hang with dynamic
696          * sw freq adjustments, this restriction can be lifted.
697          */
698         if (IS_VALLEYVIEW(gt->i915))
699                 goto skip_hw_write;
700
701         GT_TRACE(gt,
702                  "changing power mode [%d], up %d%% @ %dus, down %d%% @ %dus\n",
703                  new_power, threshold_up, ei_up, threshold_down, ei_down);
704
705         set(uncore, GEN6_RP_UP_EI,
706             intel_gt_ns_to_pm_interval(gt, ei_up * 1000));
707         set(uncore, GEN6_RP_UP_THRESHOLD,
708             intel_gt_ns_to_pm_interval(gt, ei_up * threshold_up * 10));
709
710         set(uncore, GEN6_RP_DOWN_EI,
711             intel_gt_ns_to_pm_interval(gt, ei_down * 1000));
712         set(uncore, GEN6_RP_DOWN_THRESHOLD,
713             intel_gt_ns_to_pm_interval(gt, ei_down * threshold_down * 10));
714
715         set(uncore, GEN6_RP_CONTROL,
716             (GRAPHICS_VER(gt->i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
717             GEN6_RP_MEDIA_HW_NORMAL_MODE |
718             GEN6_RP_MEDIA_IS_GFX |
719             GEN6_RP_ENABLE |
720             GEN6_RP_UP_BUSY_AVG |
721             GEN6_RP_DOWN_IDLE_AVG);
722
723 skip_hw_write:
724         rps->power.mode = new_power;
725         rps->power.up_threshold = threshold_up;
726         rps->power.down_threshold = threshold_down;
727 }
728
729 static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val)
730 {
731         int new_power;
732
733         new_power = rps->power.mode;
734         switch (rps->power.mode) {
735         case LOW_POWER:
736                 if (val > rps->efficient_freq + 1 &&
737                     val > rps->cur_freq)
738                         new_power = BETWEEN;
739                 break;
740
741         case BETWEEN:
742                 if (val <= rps->efficient_freq &&
743                     val < rps->cur_freq)
744                         new_power = LOW_POWER;
745                 else if (val >= rps->rp0_freq &&
746                          val > rps->cur_freq)
747                         new_power = HIGH_POWER;
748                 break;
749
750         case HIGH_POWER:
751                 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
752                     val < rps->cur_freq)
753                         new_power = BETWEEN;
754                 break;
755         }
756         /* Max/min bins are special */
757         if (val <= rps->min_freq_softlimit)
758                 new_power = LOW_POWER;
759         if (val >= rps->max_freq_softlimit)
760                 new_power = HIGH_POWER;
761
762         mutex_lock(&rps->power.mutex);
763         if (rps->power.interactive)
764                 new_power = HIGH_POWER;
765         rps_set_power(rps, new_power);
766         mutex_unlock(&rps->power.mutex);
767 }
768
769 void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
770 {
771         GT_TRACE(rps_to_gt(rps), "mark interactive: %s\n", yesno(interactive));
772
773         mutex_lock(&rps->power.mutex);
774         if (interactive) {
775                 if (!rps->power.interactive++ && intel_rps_is_active(rps))
776                         rps_set_power(rps, HIGH_POWER);
777         } else {
778                 GEM_BUG_ON(!rps->power.interactive);
779                 rps->power.interactive--;
780         }
781         mutex_unlock(&rps->power.mutex);
782 }
783
784 static int gen6_rps_set(struct intel_rps *rps, u8 val)
785 {
786         struct intel_uncore *uncore = rps_to_uncore(rps);
787         struct drm_i915_private *i915 = rps_to_i915(rps);
788         u32 swreq;
789
790         GEM_BUG_ON(rps_uses_slpc(rps));
791
792         if (GRAPHICS_VER(i915) >= 9)
793                 swreq = GEN9_FREQUENCY(val);
794         else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
795                 swreq = HSW_FREQUENCY(val);
796         else
797                 swreq = (GEN6_FREQUENCY(val) |
798                          GEN6_OFFSET(0) |
799                          GEN6_AGGRESSIVE_TURBO);
800         set(uncore, GEN6_RPNSWREQ, swreq);
801
802         GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d, swreq:%x\n",
803                  val, intel_gpu_freq(rps, val), swreq);
804
805         return 0;
806 }
807
808 static int vlv_rps_set(struct intel_rps *rps, u8 val)
809 {
810         struct drm_i915_private *i915 = rps_to_i915(rps);
811         int err;
812
813         vlv_punit_get(i915);
814         err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val);
815         vlv_punit_put(i915);
816
817         GT_TRACE(rps_to_gt(rps), "set val:%x, freq:%d\n",
818                  val, intel_gpu_freq(rps, val));
819
820         return err;
821 }
822
823 static int rps_set(struct intel_rps *rps, u8 val, bool update)
824 {
825         struct drm_i915_private *i915 = rps_to_i915(rps);
826         int err;
827
828         if (val == rps->last_freq)
829                 return 0;
830
831         if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
832                 err = vlv_rps_set(rps, val);
833         else if (GRAPHICS_VER(i915) >= 6)
834                 err = gen6_rps_set(rps, val);
835         else
836                 err = gen5_rps_set(rps, val);
837         if (err)
838                 return err;
839
840         if (update && GRAPHICS_VER(i915) >= 6)
841                 gen6_rps_set_thresholds(rps, val);
842         rps->last_freq = val;
843
844         return 0;
845 }
846
847 void intel_rps_unpark(struct intel_rps *rps)
848 {
849         if (!intel_rps_is_enabled(rps))
850                 return;
851
852         GT_TRACE(rps_to_gt(rps), "unpark:%x\n", rps->cur_freq);
853
854         /*
855          * Use the user's desired frequency as a guide, but for better
856          * performance, jump directly to RPe as our starting frequency.
857          */
858         mutex_lock(&rps->lock);
859
860         intel_rps_set_active(rps);
861         intel_rps_set(rps,
862                       clamp(rps->cur_freq,
863                             rps->min_freq_softlimit,
864                             rps->max_freq_softlimit));
865
866         mutex_unlock(&rps->lock);
867
868         rps->pm_iir = 0;
869         if (intel_rps_has_interrupts(rps))
870                 rps_enable_interrupts(rps);
871         if (intel_rps_uses_timer(rps))
872                 rps_start_timer(rps);
873
874         if (GRAPHICS_VER(rps_to_i915(rps)) == 5)
875                 gen5_rps_update(rps);
876 }
877
878 void intel_rps_park(struct intel_rps *rps)
879 {
880         int adj;
881
882         if (!intel_rps_is_enabled(rps))
883                 return;
884
885         GEM_BUG_ON(atomic_read(&rps->num_waiters));
886
887         if (!intel_rps_clear_active(rps))
888                 return;
889
890         if (intel_rps_uses_timer(rps))
891                 rps_stop_timer(rps);
892         if (intel_rps_has_interrupts(rps))
893                 rps_disable_interrupts(rps);
894
895         if (rps->last_freq <= rps->idle_freq)
896                 return;
897
898         /*
899          * The punit delays the write of the frequency and voltage until it
900          * determines the GPU is awake. During normal usage we don't want to
901          * waste power changing the frequency if the GPU is sleeping (rc6).
902          * However, the GPU and driver is now idle and we do not want to delay
903          * switching to minimum voltage (reducing power whilst idle) as we do
904          * not expect to be woken in the near future and so must flush the
905          * change by waking the device.
906          *
907          * We choose to take the media powerwell (either would do to trick the
908          * punit into committing the voltage change) as that takes a lot less
909          * power than the render powerwell.
910          */
911         intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA);
912         rps_set(rps, rps->idle_freq, false);
913         intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA);
914
915         /*
916          * Since we will try and restart from the previously requested
917          * frequency on unparking, treat this idle point as a downclock
918          * interrupt and reduce the frequency for resume. If we park/unpark
919          * more frequently than the rps worker can run, we will not respond
920          * to any EI and never see a change in frequency.
921          *
922          * (Note we accommodate Cherryview's limitation of only using an
923          * even bin by applying it to all.)
924          */
925         adj = rps->last_adj;
926         if (adj < 0)
927                 adj *= 2;
928         else /* CHV needs even encode values */
929                 adj = -2;
930         rps->last_adj = adj;
931         rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq);
932         if (rps->cur_freq < rps->efficient_freq) {
933                 rps->cur_freq = rps->efficient_freq;
934                 rps->last_adj = 0;
935         }
936
937         GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq);
938 }
939
940 void intel_rps_boost(struct i915_request *rq)
941 {
942         if (i915_request_signaled(rq) || i915_request_has_waitboost(rq))
943                 return;
944
945         /* Serializes with i915_request_retire() */
946         if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) {
947                 struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
948
949                 if (atomic_fetch_inc(&rps->num_waiters))
950                         return;
951
952                 if (!intel_rps_is_active(rps))
953                         return;
954
955                 GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
956                          rq->fence.context, rq->fence.seqno);
957
958                 if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
959                         schedule_work(&rps->work);
960
961                 WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
962         }
963 }
964
965 int intel_rps_set(struct intel_rps *rps, u8 val)
966 {
967         int err;
968
969         lockdep_assert_held(&rps->lock);
970         GEM_BUG_ON(val > rps->max_freq);
971         GEM_BUG_ON(val < rps->min_freq);
972
973         if (intel_rps_is_active(rps)) {
974                 err = rps_set(rps, val, true);
975                 if (err)
976                         return err;
977
978                 /*
979                  * Make sure we continue to get interrupts
980                  * until we hit the minimum or maximum frequencies.
981                  */
982                 if (intel_rps_has_interrupts(rps)) {
983                         struct intel_uncore *uncore = rps_to_uncore(rps);
984
985                         set(uncore,
986                             GEN6_RP_INTERRUPT_LIMITS, rps_limits(rps, val));
987
988                         set(uncore, GEN6_PMINTRMSK, rps_pm_mask(rps, val));
989                 }
990         }
991
992         rps->cur_freq = val;
993         return 0;
994 }
995
996 static void gen6_rps_init(struct intel_rps *rps)
997 {
998         struct drm_i915_private *i915 = rps_to_i915(rps);
999         struct intel_uncore *uncore = rps_to_uncore(rps);
1000
1001         /* All of these values are in units of 50MHz */
1002
1003         /* static values from HW: RP0 > RP1 > RPn (min_freq) */
1004         if (IS_GEN9_LP(i915)) {
1005                 u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
1006
1007                 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
1008                 rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
1009                 rps->min_freq = (rp_state_cap >>  0) & 0xff;
1010         } else {
1011                 u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
1012
1013                 rps->rp0_freq = (rp_state_cap >>  0) & 0xff;
1014                 rps->rp1_freq = (rp_state_cap >>  8) & 0xff;
1015                 rps->min_freq = (rp_state_cap >> 16) & 0xff;
1016         }
1017
1018         /* hw_max = RP0 until we check for overclocking */
1019         rps->max_freq = rps->rp0_freq;
1020
1021         rps->efficient_freq = rps->rp1_freq;
1022         if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
1023             IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
1024                 u32 ddcc_status = 0;
1025
1026                 if (sandybridge_pcode_read(i915,
1027                                            HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
1028                                            &ddcc_status, NULL) == 0)
1029                         rps->efficient_freq =
1030                                 clamp_t(u8,
1031                                         (ddcc_status >> 8) & 0xff,
1032                                         rps->min_freq,
1033                                         rps->max_freq);
1034         }
1035
1036         if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
1037                 /* Store the frequency values in 16.66 MHZ units, which is
1038                  * the natural hardware unit for SKL
1039                  */
1040                 rps->rp0_freq *= GEN9_FREQ_SCALER;
1041                 rps->rp1_freq *= GEN9_FREQ_SCALER;
1042                 rps->min_freq *= GEN9_FREQ_SCALER;
1043                 rps->max_freq *= GEN9_FREQ_SCALER;
1044                 rps->efficient_freq *= GEN9_FREQ_SCALER;
1045         }
1046 }
1047
1048 static bool rps_reset(struct intel_rps *rps)
1049 {
1050         struct drm_i915_private *i915 = rps_to_i915(rps);
1051
1052         /* force a reset */
1053         rps->power.mode = -1;
1054         rps->last_freq = -1;
1055
1056         if (rps_set(rps, rps->min_freq, true)) {
1057                 drm_err(&i915->drm, "Failed to reset RPS to initial values\n");
1058                 return false;
1059         }
1060
1061         rps->cur_freq = rps->min_freq;
1062         return true;
1063 }
1064
1065 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
1066 static bool gen9_rps_enable(struct intel_rps *rps)
1067 {
1068         struct intel_gt *gt = rps_to_gt(rps);
1069         struct intel_uncore *uncore = gt->uncore;
1070
1071         /* Program defaults and thresholds for RPS */
1072         if (GRAPHICS_VER(gt->i915) == 9)
1073                 intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1074                                       GEN9_FREQUENCY(rps->rp1_freq));
1075
1076         intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa);
1077
1078         rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1079
1080         return rps_reset(rps);
1081 }
1082
1083 static bool gen8_rps_enable(struct intel_rps *rps)
1084 {
1085         struct intel_uncore *uncore = rps_to_uncore(rps);
1086
1087         intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ,
1088                               HSW_FREQUENCY(rps->rp1_freq));
1089
1090         intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1091
1092         rps->pm_events = GEN6_PM_RP_UP_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD;
1093
1094         return rps_reset(rps);
1095 }
1096
1097 static bool gen6_rps_enable(struct intel_rps *rps)
1098 {
1099         struct intel_uncore *uncore = rps_to_uncore(rps);
1100
1101         /* Power down if completely idle for over 50ms */
1102         intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000);
1103         intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1104
1105         rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1106                           GEN6_PM_RP_DOWN_THRESHOLD |
1107                           GEN6_PM_RP_DOWN_TIMEOUT);
1108
1109         return rps_reset(rps);
1110 }
1111
1112 static int chv_rps_max_freq(struct intel_rps *rps)
1113 {
1114         struct drm_i915_private *i915 = rps_to_i915(rps);
1115         struct intel_gt *gt = rps_to_gt(rps);
1116         u32 val;
1117
1118         val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1119
1120         switch (gt->info.sseu.eu_total) {
1121         case 8:
1122                 /* (2 * 4) config */
1123                 val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT;
1124                 break;
1125         case 12:
1126                 /* (2 * 6) config */
1127                 val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT;
1128                 break;
1129         case 16:
1130                 /* (2 * 8) config */
1131         default:
1132                 /* Setting (2 * 8) Min RP0 for any other combination */
1133                 val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT;
1134                 break;
1135         }
1136
1137         return val & FB_GFX_FREQ_FUSE_MASK;
1138 }
1139
1140 static int chv_rps_rpe_freq(struct intel_rps *rps)
1141 {
1142         struct drm_i915_private *i915 = rps_to_i915(rps);
1143         u32 val;
1144
1145         val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG);
1146         val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT;
1147
1148         return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
1149 }
1150
1151 static int chv_rps_guar_freq(struct intel_rps *rps)
1152 {
1153         struct drm_i915_private *i915 = rps_to_i915(rps);
1154         u32 val;
1155
1156         val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE);
1157
1158         return val & FB_GFX_FREQ_FUSE_MASK;
1159 }
1160
1161 static u32 chv_rps_min_freq(struct intel_rps *rps)
1162 {
1163         struct drm_i915_private *i915 = rps_to_i915(rps);
1164         u32 val;
1165
1166         val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE);
1167         val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT;
1168
1169         return val & FB_GFX_FREQ_FUSE_MASK;
1170 }
1171
1172 static bool chv_rps_enable(struct intel_rps *rps)
1173 {
1174         struct intel_uncore *uncore = rps_to_uncore(rps);
1175         struct drm_i915_private *i915 = rps_to_i915(rps);
1176         u32 val;
1177
1178         /* 1: Program defaults and thresholds for RPS*/
1179         intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1180         intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1181         intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1182         intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1183         intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1184
1185         intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1186
1187         /* 2: Enable RPS */
1188         intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1189                               GEN6_RP_MEDIA_HW_NORMAL_MODE |
1190                               GEN6_RP_MEDIA_IS_GFX |
1191                               GEN6_RP_ENABLE |
1192                               GEN6_RP_UP_BUSY_AVG |
1193                               GEN6_RP_DOWN_IDLE_AVG);
1194
1195         rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
1196                           GEN6_PM_RP_DOWN_THRESHOLD |
1197                           GEN6_PM_RP_DOWN_TIMEOUT);
1198
1199         /* Setting Fixed Bias */
1200         vlv_punit_get(i915);
1201
1202         val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50;
1203         vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1204
1205         val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1206
1207         vlv_punit_put(i915);
1208
1209         /* RPS code assumes GPLL is used */
1210         drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1211                       "GPLL not enabled\n");
1212
1213         drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1214         drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1215
1216         return rps_reset(rps);
1217 }
1218
1219 static int vlv_rps_guar_freq(struct intel_rps *rps)
1220 {
1221         struct drm_i915_private *i915 = rps_to_i915(rps);
1222         u32 val, rp1;
1223
1224         val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1225
1226         rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK;
1227         rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
1228
1229         return rp1;
1230 }
1231
1232 static int vlv_rps_max_freq(struct intel_rps *rps)
1233 {
1234         struct drm_i915_private *i915 = rps_to_i915(rps);
1235         u32 val, rp0;
1236
1237         val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE);
1238
1239         rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
1240         /* Clamp to max */
1241         rp0 = min_t(u32, rp0, 0xea);
1242
1243         return rp0;
1244 }
1245
1246 static int vlv_rps_rpe_freq(struct intel_rps *rps)
1247 {
1248         struct drm_i915_private *i915 = rps_to_i915(rps);
1249         u32 val, rpe;
1250
1251         val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
1252         rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
1253         val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
1254         rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
1255
1256         return rpe;
1257 }
1258
1259 static int vlv_rps_min_freq(struct intel_rps *rps)
1260 {
1261         struct drm_i915_private *i915 = rps_to_i915(rps);
1262         u32 val;
1263
1264         val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff;
1265         /*
1266          * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
1267          * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
1268          * a BYT-M B0 the above register contains 0xbf. Moreover when setting
1269          * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
1270          * to make sure it matches what Punit accepts.
1271          */
1272         return max_t(u32, val, 0xc0);
1273 }
1274
1275 static bool vlv_rps_enable(struct intel_rps *rps)
1276 {
1277         struct intel_uncore *uncore = rps_to_uncore(rps);
1278         struct drm_i915_private *i915 = rps_to_i915(rps);
1279         u32 val;
1280
1281         intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000);
1282         intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400);
1283         intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000);
1284         intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000);
1285         intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000);
1286
1287         intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10);
1288
1289         intel_uncore_write_fw(uncore, GEN6_RP_CONTROL,
1290                               GEN6_RP_MEDIA_TURBO |
1291                               GEN6_RP_MEDIA_HW_NORMAL_MODE |
1292                               GEN6_RP_MEDIA_IS_GFX |
1293                               GEN6_RP_ENABLE |
1294                               GEN6_RP_UP_BUSY_AVG |
1295                               GEN6_RP_DOWN_IDLE_CONT);
1296
1297         /* WaGsvRC0ResidencyMethod:vlv */
1298         rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
1299
1300         vlv_punit_get(i915);
1301
1302         /* Setting Fixed Bias */
1303         val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875;
1304         vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val);
1305
1306         val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1307
1308         vlv_punit_put(i915);
1309
1310         /* RPS code assumes GPLL is used */
1311         drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
1312                       "GPLL not enabled\n");
1313
1314         drm_dbg(&i915->drm, "GPLL enabled? %s\n", yesno(val & GPLLENABLE));
1315         drm_dbg(&i915->drm, "GPU status: 0x%08x\n", val);
1316
1317         return rps_reset(rps);
1318 }
1319
1320 static unsigned long __ips_gfx_val(struct intel_ips *ips)
1321 {
1322         struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
1323         struct intel_uncore *uncore = rps_to_uncore(rps);
1324         unsigned int t, state1, state2;
1325         u32 pxvid, ext_v;
1326         u64 corr, corr2;
1327
1328         lockdep_assert_held(&mchdev_lock);
1329
1330         pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq));
1331         pxvid = (pxvid >> 24) & 0x7f;
1332         ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid);
1333
1334         state1 = ext_v;
1335
1336         /* Revel in the empirically derived constants */
1337
1338         /* Correction factor in 1/100000 units */
1339         t = ips_mch_val(uncore);
1340         if (t > 80)
1341                 corr = t * 2349 + 135940;
1342         else if (t >= 50)
1343                 corr = t * 964 + 29317;
1344         else /* < 50 */
1345                 corr = t * 301 + 1004;
1346
1347         corr = div_u64(corr * 150142 * state1, 10000) - 78642;
1348         corr2 = div_u64(corr, 100000) * ips->corr;
1349
1350         state2 = div_u64(corr2 * state1, 10000);
1351         state2 /= 100; /* convert to mW */
1352
1353         __gen5_ips_update(ips);
1354
1355         return ips->gfx_power + state2;
1356 }
1357
1358 static bool has_busy_stats(struct intel_rps *rps)
1359 {
1360         struct intel_engine_cs *engine;
1361         enum intel_engine_id id;
1362
1363         for_each_engine(engine, rps_to_gt(rps), id) {
1364                 if (!intel_engine_supports_stats(engine))
1365                         return false;
1366         }
1367
1368         return true;
1369 }
1370
1371 void intel_rps_enable(struct intel_rps *rps)
1372 {
1373         struct drm_i915_private *i915 = rps_to_i915(rps);
1374         struct intel_uncore *uncore = rps_to_uncore(rps);
1375         bool enabled = false;
1376
1377         if (!HAS_RPS(i915))
1378                 return;
1379
1380         if (rps_uses_slpc(rps))
1381                 return;
1382
1383         intel_gt_check_clock_frequency(rps_to_gt(rps));
1384
1385         intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1386         if (rps->max_freq <= rps->min_freq)
1387                 /* leave disabled, no room for dynamic reclocking */;
1388         else if (IS_CHERRYVIEW(i915))
1389                 enabled = chv_rps_enable(rps);
1390         else if (IS_VALLEYVIEW(i915))
1391                 enabled = vlv_rps_enable(rps);
1392         else if (GRAPHICS_VER(i915) >= 9)
1393                 enabled = gen9_rps_enable(rps);
1394         else if (GRAPHICS_VER(i915) >= 8)
1395                 enabled = gen8_rps_enable(rps);
1396         else if (GRAPHICS_VER(i915) >= 6)
1397                 enabled = gen6_rps_enable(rps);
1398         else if (IS_IRONLAKE_M(i915))
1399                 enabled = gen5_rps_enable(rps);
1400         else
1401                 MISSING_CASE(GRAPHICS_VER(i915));
1402         intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1403         if (!enabled)
1404                 return;
1405
1406         GT_TRACE(rps_to_gt(rps),
1407                  "min:%x, max:%x, freq:[%d, %d]\n",
1408                  rps->min_freq, rps->max_freq,
1409                  intel_gpu_freq(rps, rps->min_freq),
1410                  intel_gpu_freq(rps, rps->max_freq));
1411
1412         GEM_BUG_ON(rps->max_freq < rps->min_freq);
1413         GEM_BUG_ON(rps->idle_freq > rps->max_freq);
1414
1415         GEM_BUG_ON(rps->efficient_freq < rps->min_freq);
1416         GEM_BUG_ON(rps->efficient_freq > rps->max_freq);
1417
1418         if (has_busy_stats(rps))
1419                 intel_rps_set_timer(rps);
1420         else if (GRAPHICS_VER(i915) >= 6)
1421                 intel_rps_set_interrupts(rps);
1422         else
1423                 /* Ironlake currently uses intel_ips.ko */ {}
1424
1425         intel_rps_set_enabled(rps);
1426 }
1427
1428 static void gen6_rps_disable(struct intel_rps *rps)
1429 {
1430         set(rps_to_uncore(rps), GEN6_RP_CONTROL, 0);
1431 }
1432
1433 void intel_rps_disable(struct intel_rps *rps)
1434 {
1435         struct drm_i915_private *i915 = rps_to_i915(rps);
1436
1437         intel_rps_clear_enabled(rps);
1438         intel_rps_clear_interrupts(rps);
1439         intel_rps_clear_timer(rps);
1440
1441         if (GRAPHICS_VER(i915) >= 6)
1442                 gen6_rps_disable(rps);
1443         else if (IS_IRONLAKE_M(i915))
1444                 gen5_rps_disable(rps);
1445 }
1446
1447 static int byt_gpu_freq(struct intel_rps *rps, int val)
1448 {
1449         /*
1450          * N = val - 0xb7
1451          * Slow = Fast = GPLL ref * N
1452          */
1453         return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
1454 }
1455
1456 static int byt_freq_opcode(struct intel_rps *rps, int val)
1457 {
1458         return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
1459 }
1460
1461 static int chv_gpu_freq(struct intel_rps *rps, int val)
1462 {
1463         /*
1464          * N = val / 2
1465          * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
1466          */
1467         return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
1468 }
1469
1470 static int chv_freq_opcode(struct intel_rps *rps, int val)
1471 {
1472         /* CHV needs even values */
1473         return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
1474 }
1475
1476 int intel_gpu_freq(struct intel_rps *rps, int val)
1477 {
1478         struct drm_i915_private *i915 = rps_to_i915(rps);
1479
1480         if (GRAPHICS_VER(i915) >= 9)
1481                 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
1482                                          GEN9_FREQ_SCALER);
1483         else if (IS_CHERRYVIEW(i915))
1484                 return chv_gpu_freq(rps, val);
1485         else if (IS_VALLEYVIEW(i915))
1486                 return byt_gpu_freq(rps, val);
1487         else if (GRAPHICS_VER(i915) >= 6)
1488                 return val * GT_FREQUENCY_MULTIPLIER;
1489         else
1490                 return val;
1491 }
1492
1493 int intel_freq_opcode(struct intel_rps *rps, int val)
1494 {
1495         struct drm_i915_private *i915 = rps_to_i915(rps);
1496
1497         if (GRAPHICS_VER(i915) >= 9)
1498                 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
1499                                          GT_FREQUENCY_MULTIPLIER);
1500         else if (IS_CHERRYVIEW(i915))
1501                 return chv_freq_opcode(rps, val);
1502         else if (IS_VALLEYVIEW(i915))
1503                 return byt_freq_opcode(rps, val);
1504         else if (GRAPHICS_VER(i915) >= 6)
1505                 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
1506         else
1507                 return val;
1508 }
1509
1510 static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
1511 {
1512         struct drm_i915_private *i915 = rps_to_i915(rps);
1513
1514         rps->gpll_ref_freq =
1515                 vlv_get_cck_clock(i915, "GPLL ref",
1516                                   CCK_GPLL_CLOCK_CONTROL,
1517                                   i915->czclk_freq);
1518
1519         drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
1520                 rps->gpll_ref_freq);
1521 }
1522
1523 static void vlv_rps_init(struct intel_rps *rps)
1524 {
1525         struct drm_i915_private *i915 = rps_to_i915(rps);
1526         u32 val;
1527
1528         vlv_iosf_sb_get(i915,
1529                         BIT(VLV_IOSF_SB_PUNIT) |
1530                         BIT(VLV_IOSF_SB_NC) |
1531                         BIT(VLV_IOSF_SB_CCK));
1532
1533         vlv_init_gpll_ref_freq(rps);
1534
1535         val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1536         switch ((val >> 6) & 3) {
1537         case 0:
1538         case 1:
1539                 i915->mem_freq = 800;
1540                 break;
1541         case 2:
1542                 i915->mem_freq = 1066;
1543                 break;
1544         case 3:
1545                 i915->mem_freq = 1333;
1546                 break;
1547         }
1548         drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1549
1550         rps->max_freq = vlv_rps_max_freq(rps);
1551         rps->rp0_freq = rps->max_freq;
1552         drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1553                 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1554
1555         rps->efficient_freq = vlv_rps_rpe_freq(rps);
1556         drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1557                 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1558
1559         rps->rp1_freq = vlv_rps_guar_freq(rps);
1560         drm_dbg(&i915->drm, "RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
1561                 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1562
1563         rps->min_freq = vlv_rps_min_freq(rps);
1564         drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1565                 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1566
1567         vlv_iosf_sb_put(i915,
1568                         BIT(VLV_IOSF_SB_PUNIT) |
1569                         BIT(VLV_IOSF_SB_NC) |
1570                         BIT(VLV_IOSF_SB_CCK));
1571 }
1572
1573 static void chv_rps_init(struct intel_rps *rps)
1574 {
1575         struct drm_i915_private *i915 = rps_to_i915(rps);
1576         u32 val;
1577
1578         vlv_iosf_sb_get(i915,
1579                         BIT(VLV_IOSF_SB_PUNIT) |
1580                         BIT(VLV_IOSF_SB_NC) |
1581                         BIT(VLV_IOSF_SB_CCK));
1582
1583         vlv_init_gpll_ref_freq(rps);
1584
1585         val = vlv_cck_read(i915, CCK_FUSE_REG);
1586
1587         switch ((val >> 2) & 0x7) {
1588         case 3:
1589                 i915->mem_freq = 2000;
1590                 break;
1591         default:
1592                 i915->mem_freq = 1600;
1593                 break;
1594         }
1595         drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq);
1596
1597         rps->max_freq = chv_rps_max_freq(rps);
1598         rps->rp0_freq = rps->max_freq;
1599         drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
1600                 intel_gpu_freq(rps, rps->max_freq), rps->max_freq);
1601
1602         rps->efficient_freq = chv_rps_rpe_freq(rps);
1603         drm_dbg(&i915->drm, "RPe GPU freq: %d MHz (%u)\n",
1604                 intel_gpu_freq(rps, rps->efficient_freq), rps->efficient_freq);
1605
1606         rps->rp1_freq = chv_rps_guar_freq(rps);
1607         drm_dbg(&i915->drm, "RP1(Guar) GPU freq: %d MHz (%u)\n",
1608                 intel_gpu_freq(rps, rps->rp1_freq), rps->rp1_freq);
1609
1610         rps->min_freq = chv_rps_min_freq(rps);
1611         drm_dbg(&i915->drm, "min GPU freq: %d MHz (%u)\n",
1612                 intel_gpu_freq(rps, rps->min_freq), rps->min_freq);
1613
1614         vlv_iosf_sb_put(i915,
1615                         BIT(VLV_IOSF_SB_PUNIT) |
1616                         BIT(VLV_IOSF_SB_NC) |
1617                         BIT(VLV_IOSF_SB_CCK));
1618
1619         drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
1620                                    rps->rp1_freq | rps->min_freq) & 1,
1621                       "Odd GPU freq values\n");
1622 }
1623
1624 static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
1625 {
1626         ei->ktime = ktime_get_raw();
1627         ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT);
1628         ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
1629 }
1630
1631 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
1632 {
1633         struct intel_uncore *uncore = rps_to_uncore(rps);
1634         const struct intel_rps_ei *prev = &rps->ei;
1635         struct intel_rps_ei now;
1636         u32 events = 0;
1637
1638         if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
1639                 return 0;
1640
1641         vlv_c0_read(uncore, &now);
1642
1643         if (prev->ktime) {
1644                 u64 time, c0;
1645                 u32 render, media;
1646
1647                 time = ktime_us_delta(now.ktime, prev->ktime);
1648
1649                 time *= rps_to_i915(rps)->czclk_freq;
1650
1651                 /* Workload can be split between render + media,
1652                  * e.g. SwapBuffers being blitted in X after being rendered in
1653                  * mesa. To account for this we need to combine both engines
1654                  * into our activity counter.
1655                  */
1656                 render = now.render_c0 - prev->render_c0;
1657                 media = now.media_c0 - prev->media_c0;
1658                 c0 = max(render, media);
1659                 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1660
1661                 if (c0 > time * rps->power.up_threshold)
1662                         events = GEN6_PM_RP_UP_THRESHOLD;
1663                 else if (c0 < time * rps->power.down_threshold)
1664                         events = GEN6_PM_RP_DOWN_THRESHOLD;
1665         }
1666
1667         rps->ei = now;
1668         return events;
1669 }
1670
1671 static void rps_work(struct work_struct *work)
1672 {
1673         struct intel_rps *rps = container_of(work, typeof(*rps), work);
1674         struct intel_gt *gt = rps_to_gt(rps);
1675         struct drm_i915_private *i915 = rps_to_i915(rps);
1676         bool client_boost = false;
1677         int new_freq, adj, min, max;
1678         u32 pm_iir = 0;
1679
1680         spin_lock_irq(&gt->irq_lock);
1681         pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events;
1682         client_boost = atomic_read(&rps->num_waiters);
1683         spin_unlock_irq(&gt->irq_lock);
1684
1685         /* Make sure we didn't queue anything we're not going to process. */
1686         if (!pm_iir && !client_boost)
1687                 goto out;
1688
1689         mutex_lock(&rps->lock);
1690         if (!intel_rps_is_active(rps)) {
1691                 mutex_unlock(&rps->lock);
1692                 return;
1693         }
1694
1695         pm_iir |= vlv_wa_c0_ei(rps, pm_iir);
1696
1697         adj = rps->last_adj;
1698         new_freq = rps->cur_freq;
1699         min = rps->min_freq_softlimit;
1700         max = rps->max_freq_softlimit;
1701         if (client_boost)
1702                 max = rps->max_freq;
1703
1704         GT_TRACE(gt,
1705                  "pm_iir:%x, client_boost:%s, last:%d, cur:%x, min:%x, max:%x\n",
1706                  pm_iir, yesno(client_boost),
1707                  adj, new_freq, min, max);
1708
1709         if (client_boost && new_freq < rps->boost_freq) {
1710                 new_freq = rps->boost_freq;
1711                 adj = 0;
1712         } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1713                 if (adj > 0)
1714                         adj *= 2;
1715                 else /* CHV needs even encode values */
1716                         adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1;
1717
1718                 if (new_freq >= rps->max_freq_softlimit)
1719                         adj = 0;
1720         } else if (client_boost) {
1721                 adj = 0;
1722         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1723                 if (rps->cur_freq > rps->efficient_freq)
1724                         new_freq = rps->efficient_freq;
1725                 else if (rps->cur_freq > rps->min_freq_softlimit)
1726                         new_freq = rps->min_freq_softlimit;
1727                 adj = 0;
1728         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1729                 if (adj < 0)
1730                         adj *= 2;
1731                 else /* CHV needs even encode values */
1732                         adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1;
1733
1734                 if (new_freq <= rps->min_freq_softlimit)
1735                         adj = 0;
1736         } else { /* unknown event */
1737                 adj = 0;
1738         }
1739
1740         /*
1741          * sysfs frequency limits may have snuck in while
1742          * servicing the interrupt
1743          */
1744         new_freq += adj;
1745         new_freq = clamp_t(int, new_freq, min, max);
1746
1747         if (intel_rps_set(rps, new_freq)) {
1748                 drm_dbg(&i915->drm, "Failed to set new GPU frequency\n");
1749                 adj = 0;
1750         }
1751         rps->last_adj = adj;
1752
1753         mutex_unlock(&rps->lock);
1754
1755 out:
1756         spin_lock_irq(&gt->irq_lock);
1757         gen6_gt_pm_unmask_irq(gt, rps->pm_events);
1758         spin_unlock_irq(&gt->irq_lock);
1759 }
1760
1761 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1762 {
1763         struct intel_gt *gt = rps_to_gt(rps);
1764         const u32 events = rps->pm_events & pm_iir;
1765
1766         lockdep_assert_held(&gt->irq_lock);
1767
1768         if (unlikely(!events))
1769                 return;
1770
1771         GT_TRACE(gt, "irq events:%x\n", events);
1772
1773         gen6_gt_pm_mask_irq(gt, events);
1774
1775         rps->pm_iir |= events;
1776         schedule_work(&rps->work);
1777 }
1778
1779 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
1780 {
1781         struct intel_gt *gt = rps_to_gt(rps);
1782         u32 events;
1783
1784         events = pm_iir & rps->pm_events;
1785         if (events) {
1786                 spin_lock(&gt->irq_lock);
1787
1788                 GT_TRACE(gt, "irq events:%x\n", events);
1789
1790                 gen6_gt_pm_mask_irq(gt, events);
1791                 rps->pm_iir |= events;
1792
1793                 schedule_work(&rps->work);
1794                 spin_unlock(&gt->irq_lock);
1795         }
1796
1797         if (GRAPHICS_VER(gt->i915) >= 8)
1798                 return;
1799
1800         if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1801                 intel_engine_cs_irq(gt->engine[VECS0], pm_iir >> 10);
1802
1803         if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1804                 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1805 }
1806
1807 void gen5_rps_irq_handler(struct intel_rps *rps)
1808 {
1809         struct intel_uncore *uncore = rps_to_uncore(rps);
1810         u32 busy_up, busy_down, max_avg, min_avg;
1811         u8 new_freq;
1812
1813         spin_lock(&mchdev_lock);
1814
1815         intel_uncore_write16(uncore,
1816                              MEMINTRSTS,
1817                              intel_uncore_read(uncore, MEMINTRSTS));
1818
1819         intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
1820         busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
1821         busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
1822         max_avg = intel_uncore_read(uncore, RCBMAXAVG);
1823         min_avg = intel_uncore_read(uncore, RCBMINAVG);
1824
1825         /* Handle RCS change request from hw */
1826         new_freq = rps->cur_freq;
1827         if (busy_up > max_avg)
1828                 new_freq++;
1829         else if (busy_down < min_avg)
1830                 new_freq--;
1831         new_freq = clamp(new_freq,
1832                          rps->min_freq_softlimit,
1833                          rps->max_freq_softlimit);
1834
1835         if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq))
1836                 rps->cur_freq = new_freq;
1837
1838         spin_unlock(&mchdev_lock);
1839 }
1840
1841 void intel_rps_init_early(struct intel_rps *rps)
1842 {
1843         mutex_init(&rps->lock);
1844         mutex_init(&rps->power.mutex);
1845
1846         INIT_WORK(&rps->work, rps_work);
1847         timer_setup(&rps->timer, rps_timer, 0);
1848
1849         atomic_set(&rps->num_waiters, 0);
1850 }
1851
1852 void intel_rps_init(struct intel_rps *rps)
1853 {
1854         struct drm_i915_private *i915 = rps_to_i915(rps);
1855
1856         if (rps_uses_slpc(rps))
1857                 return;
1858
1859         if (IS_CHERRYVIEW(i915))
1860                 chv_rps_init(rps);
1861         else if (IS_VALLEYVIEW(i915))
1862                 vlv_rps_init(rps);
1863         else if (GRAPHICS_VER(i915) >= 6)
1864                 gen6_rps_init(rps);
1865         else if (IS_IRONLAKE_M(i915))
1866                 gen5_rps_init(rps);
1867
1868         /* Derive initial user preferences/limits from the hardware limits */
1869         rps->max_freq_softlimit = rps->max_freq;
1870         rps->min_freq_softlimit = rps->min_freq;
1871
1872         /* After setting max-softlimit, find the overclock max freq */
1873         if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) {
1874                 u32 params = 0;
1875
1876                 sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS,
1877                                        &params, NULL);
1878                 if (params & BIT(31)) { /* OC supported */
1879                         drm_dbg(&i915->drm,
1880                                 "Overclocking supported, max: %dMHz, overclock: %dMHz\n",
1881                                 (rps->max_freq & 0xff) * 50,
1882                                 (params & 0xff) * 50);
1883                         rps->max_freq = params & 0xff;
1884                 }
1885         }
1886
1887         /* Finally allow us to boost to max by default */
1888         rps->boost_freq = rps->max_freq;
1889         rps->idle_freq = rps->min_freq;
1890
1891         /* Start in the middle, from here we will autotune based on workload */
1892         rps->cur_freq = rps->efficient_freq;
1893
1894         rps->pm_intrmsk_mbz = 0;
1895
1896         /*
1897          * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
1898          * if GEN6_PM_UP_EI_EXPIRED is masked.
1899          *
1900          * TODO: verify if this can be reproduced on VLV,CHV.
1901          */
1902         if (GRAPHICS_VER(i915) <= 7)
1903                 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
1904
1905         if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
1906                 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1907
1908         /* GuC needs ARAT expired interrupt unmasked */
1909         if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc))
1910                 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1911 }
1912
1913 void intel_rps_sanitize(struct intel_rps *rps)
1914 {
1915         if (rps_uses_slpc(rps))
1916                 return;
1917
1918         if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
1919                 rps_disable_interrupts(rps);
1920 }
1921
1922 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat)
1923 {
1924         struct drm_i915_private *i915 = rps_to_i915(rps);
1925         u32 cagf;
1926
1927         if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1928                 cagf = (rpstat >> 8) & 0xff;
1929         else if (GRAPHICS_VER(i915) >= 9)
1930                 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1931         else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1932                 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1933         else if (GRAPHICS_VER(i915) >= 6)
1934                 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1935         else
1936                 cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
1937                                         MEMSTAT_PSTATE_SHIFT);
1938
1939         return cagf;
1940 }
1941
1942 static u32 read_cagf(struct intel_rps *rps)
1943 {
1944         struct drm_i915_private *i915 = rps_to_i915(rps);
1945         struct intel_uncore *uncore = rps_to_uncore(rps);
1946         u32 freq;
1947
1948         if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1949                 vlv_punit_get(i915);
1950                 freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
1951                 vlv_punit_put(i915);
1952         } else if (GRAPHICS_VER(i915) >= 6) {
1953                 freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
1954         } else {
1955                 freq = intel_uncore_read(uncore, MEMSTAT_ILK);
1956         }
1957
1958         return intel_rps_get_cagf(rps, freq);
1959 }
1960
1961 u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
1962 {
1963         struct intel_runtime_pm *rpm = rps_to_uncore(rps)->rpm;
1964         intel_wakeref_t wakeref;
1965         u32 freq = 0;
1966
1967         with_intel_runtime_pm_if_in_use(rpm, wakeref)
1968                 freq = intel_gpu_freq(rps, read_cagf(rps));
1969
1970         return freq;
1971 }
1972
1973 u32 intel_rps_read_punit_req(struct intel_rps *rps)
1974 {
1975         struct intel_uncore *uncore = rps_to_uncore(rps);
1976
1977         return intel_uncore_read(uncore, GEN6_RPNSWREQ);
1978 }
1979
1980 static u32 intel_rps_get_req(u32 pureq)
1981 {
1982         u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT;
1983
1984         return req;
1985 }
1986
1987 u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps)
1988 {
1989         u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps));
1990
1991         return intel_gpu_freq(rps, freq);
1992 }
1993
1994 u32 intel_rps_get_requested_frequency(struct intel_rps *rps)
1995 {
1996         if (rps_uses_slpc(rps))
1997                 return intel_rps_read_punit_req_frequency(rps);
1998         else
1999                 return intel_gpu_freq(rps, rps->cur_freq);
2000 }
2001
2002 u32 intel_rps_get_max_frequency(struct intel_rps *rps)
2003 {
2004         struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2005
2006         if (rps_uses_slpc(rps))
2007                 return slpc->max_freq_softlimit;
2008         else
2009                 return intel_gpu_freq(rps, rps->max_freq_softlimit);
2010 }
2011
2012 u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
2013 {
2014         struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2015
2016         if (rps_uses_slpc(rps))
2017                 return slpc->rp0_freq;
2018         else
2019                 return intel_gpu_freq(rps, rps->rp0_freq);
2020 }
2021
2022 u32 intel_rps_get_rp1_frequency(struct intel_rps *rps)
2023 {
2024         struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2025
2026         if (rps_uses_slpc(rps))
2027                 return slpc->rp1_freq;
2028         else
2029                 return intel_gpu_freq(rps, rps->rp1_freq);
2030 }
2031
2032 u32 intel_rps_get_rpn_frequency(struct intel_rps *rps)
2033 {
2034         struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2035
2036         if (rps_uses_slpc(rps))
2037                 return slpc->min_freq;
2038         else
2039                 return intel_gpu_freq(rps, rps->min_freq);
2040 }
2041
2042 static int set_max_freq(struct intel_rps *rps, u32 val)
2043 {
2044         struct drm_i915_private *i915 = rps_to_i915(rps);
2045         int ret = 0;
2046
2047         mutex_lock(&rps->lock);
2048
2049         val = intel_freq_opcode(rps, val);
2050         if (val < rps->min_freq ||
2051             val > rps->max_freq ||
2052             val < rps->min_freq_softlimit) {
2053                 ret = -EINVAL;
2054                 goto unlock;
2055         }
2056
2057         if (val > rps->rp0_freq)
2058                 drm_dbg(&i915->drm, "User requested overclocking to %d\n",
2059                         intel_gpu_freq(rps, val));
2060
2061         rps->max_freq_softlimit = val;
2062
2063         val = clamp_t(int, rps->cur_freq,
2064                       rps->min_freq_softlimit,
2065                       rps->max_freq_softlimit);
2066
2067         /*
2068          * We still need *_set_rps to process the new max_delay and
2069          * update the interrupt limits and PMINTRMSK even though
2070          * frequency request may be unchanged.
2071          */
2072         intel_rps_set(rps, val);
2073
2074 unlock:
2075         mutex_unlock(&rps->lock);
2076
2077         return ret;
2078 }
2079
2080 int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val)
2081 {
2082         struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2083
2084         if (rps_uses_slpc(rps))
2085                 return intel_guc_slpc_set_max_freq(slpc, val);
2086         else
2087                 return set_max_freq(rps, val);
2088 }
2089
2090 u32 intel_rps_get_min_frequency(struct intel_rps *rps)
2091 {
2092         struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2093
2094         if (rps_uses_slpc(rps))
2095                 return slpc->min_freq_softlimit;
2096         else
2097                 return intel_gpu_freq(rps, rps->min_freq_softlimit);
2098 }
2099
2100 static int set_min_freq(struct intel_rps *rps, u32 val)
2101 {
2102         int ret = 0;
2103
2104         mutex_lock(&rps->lock);
2105
2106         val = intel_freq_opcode(rps, val);
2107         if (val < rps->min_freq ||
2108             val > rps->max_freq ||
2109             val > rps->max_freq_softlimit) {
2110                 ret = -EINVAL;
2111                 goto unlock;
2112         }
2113
2114         rps->min_freq_softlimit = val;
2115
2116         val = clamp_t(int, rps->cur_freq,
2117                       rps->min_freq_softlimit,
2118                       rps->max_freq_softlimit);
2119
2120         /*
2121          * We still need *_set_rps to process the new min_delay and
2122          * update the interrupt limits and PMINTRMSK even though
2123          * frequency request may be unchanged.
2124          */
2125         intel_rps_set(rps, val);
2126
2127 unlock:
2128         mutex_unlock(&rps->lock);
2129
2130         return ret;
2131 }
2132
2133 int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
2134 {
2135         struct intel_guc_slpc *slpc = rps_to_slpc(rps);
2136
2137         if (rps_uses_slpc(rps))
2138                 return intel_guc_slpc_set_min_freq(slpc, val);
2139         else
2140                 return set_min_freq(rps, val);
2141 }
2142
2143 /* External interface for intel_ips.ko */
2144
2145 static struct drm_i915_private __rcu *ips_mchdev;
2146
2147 /**
2148  * Tells the intel_ips driver that the i915 driver is now loaded, if
2149  * IPS got loaded first.
2150  *
2151  * This awkward dance is so that neither module has to depend on the
2152  * other in order for IPS to do the appropriate communication of
2153  * GPU turbo limits to i915.
2154  */
2155 static void
2156 ips_ping_for_i915_load(void)
2157 {
2158         void (*link)(void);
2159
2160         link = symbol_get(ips_link_to_i915_driver);
2161         if (link) {
2162                 link();
2163                 symbol_put(ips_link_to_i915_driver);
2164         }
2165 }
2166
2167 void intel_rps_driver_register(struct intel_rps *rps)
2168 {
2169         struct intel_gt *gt = rps_to_gt(rps);
2170
2171         /*
2172          * We only register the i915 ips part with intel-ips once everything is
2173          * set up, to avoid intel-ips sneaking in and reading bogus values.
2174          */
2175         if (GRAPHICS_VER(gt->i915) == 5) {
2176                 GEM_BUG_ON(ips_mchdev);
2177                 rcu_assign_pointer(ips_mchdev, gt->i915);
2178                 ips_ping_for_i915_load();
2179         }
2180 }
2181
2182 void intel_rps_driver_unregister(struct intel_rps *rps)
2183 {
2184         if (rcu_access_pointer(ips_mchdev) == rps_to_i915(rps))
2185                 rcu_assign_pointer(ips_mchdev, NULL);
2186 }
2187
2188 static struct drm_i915_private *mchdev_get(void)
2189 {
2190         struct drm_i915_private *i915;
2191
2192         rcu_read_lock();
2193         i915 = rcu_dereference(ips_mchdev);
2194         if (i915 && !kref_get_unless_zero(&i915->drm.ref))
2195                 i915 = NULL;
2196         rcu_read_unlock();
2197
2198         return i915;
2199 }
2200
2201 /**
2202  * i915_read_mch_val - return value for IPS use
2203  *
2204  * Calculate and return a value for the IPS driver to use when deciding whether
2205  * we have thermal and power headroom to increase CPU or GPU power budget.
2206  */
2207 unsigned long i915_read_mch_val(void)
2208 {
2209         struct drm_i915_private *i915;
2210         unsigned long chipset_val = 0;
2211         unsigned long graphics_val = 0;
2212         intel_wakeref_t wakeref;
2213
2214         i915 = mchdev_get();
2215         if (!i915)
2216                 return 0;
2217
2218         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2219                 struct intel_ips *ips = &i915->gt.rps.ips;
2220
2221                 spin_lock_irq(&mchdev_lock);
2222                 chipset_val = __ips_chipset_val(ips);
2223                 graphics_val = __ips_gfx_val(ips);
2224                 spin_unlock_irq(&mchdev_lock);
2225         }
2226
2227         drm_dev_put(&i915->drm);
2228         return chipset_val + graphics_val;
2229 }
2230 EXPORT_SYMBOL_GPL(i915_read_mch_val);
2231
2232 /**
2233  * i915_gpu_raise - raise GPU frequency limit
2234  *
2235  * Raise the limit; IPS indicates we have thermal headroom.
2236  */
2237 bool i915_gpu_raise(void)
2238 {
2239         struct drm_i915_private *i915;
2240         struct intel_rps *rps;
2241
2242         i915 = mchdev_get();
2243         if (!i915)
2244                 return false;
2245
2246         rps = &i915->gt.rps;
2247
2248         spin_lock_irq(&mchdev_lock);
2249         if (rps->max_freq_softlimit < rps->max_freq)
2250                 rps->max_freq_softlimit++;
2251         spin_unlock_irq(&mchdev_lock);
2252
2253         drm_dev_put(&i915->drm);
2254         return true;
2255 }
2256 EXPORT_SYMBOL_GPL(i915_gpu_raise);
2257
2258 /**
2259  * i915_gpu_lower - lower GPU frequency limit
2260  *
2261  * IPS indicates we're close to a thermal limit, so throttle back the GPU
2262  * frequency maximum.
2263  */
2264 bool i915_gpu_lower(void)
2265 {
2266         struct drm_i915_private *i915;
2267         struct intel_rps *rps;
2268
2269         i915 = mchdev_get();
2270         if (!i915)
2271                 return false;
2272
2273         rps = &i915->gt.rps;
2274
2275         spin_lock_irq(&mchdev_lock);
2276         if (rps->max_freq_softlimit > rps->min_freq)
2277                 rps->max_freq_softlimit--;
2278         spin_unlock_irq(&mchdev_lock);
2279
2280         drm_dev_put(&i915->drm);
2281         return true;
2282 }
2283 EXPORT_SYMBOL_GPL(i915_gpu_lower);
2284
2285 /**
2286  * i915_gpu_busy - indicate GPU business to IPS
2287  *
2288  * Tell the IPS driver whether or not the GPU is busy.
2289  */
2290 bool i915_gpu_busy(void)
2291 {
2292         struct drm_i915_private *i915;
2293         bool ret;
2294
2295         i915 = mchdev_get();
2296         if (!i915)
2297                 return false;
2298
2299         ret = i915->gt.awake;
2300
2301         drm_dev_put(&i915->drm);
2302         return ret;
2303 }
2304 EXPORT_SYMBOL_GPL(i915_gpu_busy);
2305
2306 /**
2307  * i915_gpu_turbo_disable - disable graphics turbo
2308  *
2309  * Disable graphics turbo by resetting the max frequency and setting the
2310  * current frequency to the default.
2311  */
2312 bool i915_gpu_turbo_disable(void)
2313 {
2314         struct drm_i915_private *i915;
2315         struct intel_rps *rps;
2316         bool ret;
2317
2318         i915 = mchdev_get();
2319         if (!i915)
2320                 return false;
2321
2322         rps = &i915->gt.rps;
2323
2324         spin_lock_irq(&mchdev_lock);
2325         rps->max_freq_softlimit = rps->min_freq;
2326         ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq);
2327         spin_unlock_irq(&mchdev_lock);
2328
2329         drm_dev_put(&i915->drm);
2330         return ret;
2331 }
2332 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
2333
2334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2335 #include "selftest_rps.c"
2336 #include "selftest_slpc.c"
2337 #endif