2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017-2018 Intel Corporation
8 #include <linux/pm_runtime.h>
10 #include "gt/intel_engine.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_engine_user.h"
13 #include "gt/intel_gt_pm.h"
14 #include "gt/intel_rc6.h"
15 #include "gt/intel_rps.h"
21 /* Frequency for the sampling timer for events which need it. */
23 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
25 #define ENGINE_SAMPLE_MASK \
26 (BIT(I915_SAMPLE_BUSY) | \
27 BIT(I915_SAMPLE_WAIT) | \
28 BIT(I915_SAMPLE_SEMA))
30 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
32 static cpumask_t i915_pmu_cpumask;
34 static u8 engine_config_sample(u64 config)
36 return config & I915_PMU_SAMPLE_MASK;
39 static u8 engine_event_sample(struct perf_event *event)
41 return engine_config_sample(event->attr.config);
44 static u8 engine_event_class(struct perf_event *event)
46 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
49 static u8 engine_event_instance(struct perf_event *event)
51 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
54 static bool is_engine_config(u64 config)
56 return config < __I915_PMU_OTHER(0);
59 static unsigned int config_enabled_bit(u64 config)
61 if (is_engine_config(config))
62 return engine_config_sample(config);
64 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
67 static u64 config_enabled_mask(u64 config)
69 return BIT_ULL(config_enabled_bit(config));
72 static bool is_engine_event(struct perf_event *event)
74 return is_engine_config(event->attr.config);
77 static unsigned int event_enabled_bit(struct perf_event *event)
79 return config_enabled_bit(event->attr.config);
82 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
84 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
88 * Only some counters need the sampling timer.
90 * We start with a bitmask of all currently enabled events.
95 * Mask out all the ones which do not need the timer, or in
96 * other words keep all the ones that could need the timer.
98 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
99 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
103 * When the GPU is idle per-engine counters do not need to be
104 * running so clear those bits out.
107 enable &= ~ENGINE_SAMPLE_MASK;
109 * Also there is software busyness tracking available we do not
110 * need the timer for I915_SAMPLE_BUSY counter.
112 else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
113 enable &= ~BIT(I915_SAMPLE_BUSY);
116 * If some bits remain it means we need the sampling timer running.
121 static u64 __get_rc6(struct intel_gt *gt)
123 struct drm_i915_private *i915 = gt->i915;
126 val = intel_rc6_residency_ns(>->rc6,
127 IS_VALLEYVIEW(i915) ?
132 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p);
135 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp);
140 #if IS_ENABLED(CONFIG_PM)
142 static inline s64 ktime_since(const ktime_t kt)
144 return ktime_to_ns(ktime_sub(ktime_get(), kt));
147 static u64 get_rc6(struct intel_gt *gt)
149 struct drm_i915_private *i915 = gt->i915;
150 struct i915_pmu *pmu = &i915->pmu;
155 if (intel_gt_pm_get_if_awake(gt)) {
157 intel_gt_pm_put_async(gt);
161 spin_lock_irqsave(&pmu->lock, flags);
164 pmu->sample[__I915_SAMPLE_RC6].cur = val;
167 * We think we are runtime suspended.
169 * Report the delta from when the device was suspended to now,
170 * on top of the last known real value, as the approximated RC6
173 val = ktime_since(pmu->sleep_last);
174 val += pmu->sample[__I915_SAMPLE_RC6].cur;
177 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
178 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
180 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
182 spin_unlock_irqrestore(&pmu->lock, flags);
187 static void park_rc6(struct drm_i915_private *i915)
189 struct i915_pmu *pmu = &i915->pmu;
191 if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
192 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
194 pmu->sleep_last = ktime_get();
199 static u64 get_rc6(struct intel_gt *gt)
201 return __get_rc6(gt);
204 static void park_rc6(struct drm_i915_private *i915) {}
208 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
210 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
211 pmu->timer_enabled = true;
212 pmu->timer_last = ktime_get();
213 hrtimer_start_range_ns(&pmu->timer,
214 ns_to_ktime(PERIOD), 0,
215 HRTIMER_MODE_REL_PINNED);
219 void i915_pmu_gt_parked(struct drm_i915_private *i915)
221 struct i915_pmu *pmu = &i915->pmu;
223 if (!pmu->base.event_init)
226 spin_lock_irq(&pmu->lock);
231 * Signal sampling timer to stop if only engine events are enabled and
234 pmu->timer_enabled = pmu_needs_timer(pmu, false);
236 spin_unlock_irq(&pmu->lock);
239 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
241 struct i915_pmu *pmu = &i915->pmu;
243 if (!pmu->base.event_init)
246 spin_lock_irq(&pmu->lock);
249 * Re-enable sampling timer when GPU goes active.
251 __i915_pmu_maybe_start_timer(pmu);
253 spin_unlock_irq(&pmu->lock);
257 add_sample(struct i915_pmu_sample *sample, u32 val)
262 static bool exclusive_mmio_access(const struct drm_i915_private *i915)
265 * We have to avoid concurrent mmio cache line access on gen7 or
266 * risk a machine hang. For a fun history lesson dig out the old
267 * userspace intel_gpu_top and run it on Ivybridge or Haswell!
269 return IS_GEN(i915, 7);
273 engines_sample(struct intel_gt *gt, unsigned int period_ns)
275 struct drm_i915_private *i915 = gt->i915;
276 struct intel_engine_cs *engine;
277 enum intel_engine_id id;
279 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
282 if (!intel_gt_pm_is_awake(gt))
285 for_each_engine(engine, gt, id) {
286 struct intel_engine_pmu *pmu = &engine->pmu;
287 spinlock_t *mmio_lock;
292 if (!intel_engine_pm_get_if_awake(engine))
296 if (exclusive_mmio_access(i915))
297 mmio_lock = &engine->uncore->lock;
299 if (unlikely(mmio_lock))
300 spin_lock_irqsave(mmio_lock, flags);
302 val = ENGINE_READ_FW(engine, RING_CTL);
303 if (val == 0) /* powerwell off => engine idle */
307 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
308 if (val & RING_WAIT_SEMAPHORE)
309 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
311 /* No need to sample when busy stats are supported. */
312 if (intel_engine_supports_stats(engine))
316 * While waiting on a semaphore or event, MI_MODE reports the
317 * ring as idle. However, previously using the seqno, and with
318 * execlists sampling, we account for the ring waiting as the
319 * engine being busy. Therefore, we record the sample as being
320 * busy if either waiting or !idle.
322 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
324 val = ENGINE_READ_FW(engine, RING_MI_MODE);
325 busy = !(val & MODE_IDLE);
328 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
331 if (unlikely(mmio_lock))
332 spin_unlock_irqrestore(mmio_lock, flags);
333 intel_engine_pm_put_async(engine);
338 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
340 sample->cur += mul_u32_u32(val, mul);
343 static bool frequency_sampling_enabled(struct i915_pmu *pmu)
346 (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
347 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
351 frequency_sample(struct intel_gt *gt, unsigned int period_ns)
353 struct drm_i915_private *i915 = gt->i915;
354 struct intel_uncore *uncore = gt->uncore;
355 struct i915_pmu *pmu = &i915->pmu;
356 struct intel_rps *rps = >->rps;
358 if (!frequency_sampling_enabled(pmu))
361 /* Report 0/0 (actual/requested) frequency while parked. */
362 if (!intel_gt_pm_get_if_awake(gt))
365 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
369 * We take a quick peek here without using forcewake
370 * so that we don't perturb the system under observation
371 * (forcewake => !rc6 => increased power use). We expect
372 * that if the read fails because it is outside of the
373 * mmio power well, then it will return 0 -- in which
374 * case we assume the system is running at the intended
375 * frequency. Fortunately, the read should rarely fail!
377 val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
379 val = intel_rps_get_cagf(rps, val);
383 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
384 intel_gpu_freq(rps, val), period_ns / 1000);
387 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
388 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
389 intel_gpu_freq(rps, rps->cur_freq),
393 intel_gt_pm_put_async(gt);
396 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
398 struct drm_i915_private *i915 =
399 container_of(hrtimer, struct drm_i915_private, pmu.timer);
400 struct i915_pmu *pmu = &i915->pmu;
401 struct intel_gt *gt = &i915->gt;
402 unsigned int period_ns;
405 if (!READ_ONCE(pmu->timer_enabled))
406 return HRTIMER_NORESTART;
409 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
410 pmu->timer_last = now;
413 * Strictly speaking the passed in period may not be 100% accurate for
414 * all internal calculation, since some amount of time can be spent on
415 * grabbing the forcewake. However the potential error from timer call-
416 * back delay greatly dominates this so we keep it simple.
418 engines_sample(gt, period_ns);
419 frequency_sample(gt, period_ns);
421 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
423 return HRTIMER_RESTART;
426 static u64 count_interrupts(struct drm_i915_private *i915)
428 /* open-coded kstat_irqs() */
429 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
433 if (!desc || !desc->kstat_irqs)
436 for_each_possible_cpu(cpu)
437 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
442 static void engine_event_destroy(struct perf_event *event)
444 struct drm_i915_private *i915 =
445 container_of(event->pmu, typeof(*i915), pmu.base);
446 struct intel_engine_cs *engine;
448 engine = intel_engine_lookup_user(i915,
449 engine_event_class(event),
450 engine_event_instance(event));
451 if (WARN_ON_ONCE(!engine))
454 if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
455 intel_engine_supports_stats(engine))
456 intel_disable_engine_stats(engine);
459 static void i915_pmu_event_destroy(struct perf_event *event)
461 WARN_ON(event->parent);
463 if (is_engine_event(event))
464 engine_event_destroy(event);
468 engine_event_status(struct intel_engine_cs *engine,
469 enum drm_i915_pmu_engine_sample sample)
472 case I915_SAMPLE_BUSY:
473 case I915_SAMPLE_WAIT:
475 case I915_SAMPLE_SEMA:
476 if (INTEL_GEN(engine->i915) < 6)
487 config_status(struct drm_i915_private *i915, u64 config)
490 case I915_PMU_ACTUAL_FREQUENCY:
491 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
492 /* Requires a mutex for sampling! */
495 case I915_PMU_REQUESTED_FREQUENCY:
496 if (INTEL_GEN(i915) < 6)
499 case I915_PMU_INTERRUPTS:
501 case I915_PMU_RC6_RESIDENCY:
512 static int engine_event_init(struct perf_event *event)
514 struct drm_i915_private *i915 =
515 container_of(event->pmu, typeof(*i915), pmu.base);
516 struct intel_engine_cs *engine;
520 engine = intel_engine_lookup_user(i915, engine_event_class(event),
521 engine_event_instance(event));
525 sample = engine_event_sample(event);
526 ret = engine_event_status(engine, sample);
530 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
531 ret = intel_enable_engine_stats(engine);
536 static int i915_pmu_event_init(struct perf_event *event)
538 struct drm_i915_private *i915 =
539 container_of(event->pmu, typeof(*i915), pmu.base);
542 if (event->attr.type != event->pmu->type)
545 /* unsupported modes and filters */
546 if (event->attr.sample_period) /* no sampling */
549 if (has_branch_stack(event))
555 /* only allow running on one cpu at a time */
556 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
559 if (is_engine_event(event))
560 ret = engine_event_init(event);
562 ret = config_status(i915, event->attr.config);
567 event->destroy = i915_pmu_event_destroy;
572 static u64 __i915_pmu_event_read(struct perf_event *event)
574 struct drm_i915_private *i915 =
575 container_of(event->pmu, typeof(*i915), pmu.base);
576 struct i915_pmu *pmu = &i915->pmu;
579 if (is_engine_event(event)) {
580 u8 sample = engine_event_sample(event);
581 struct intel_engine_cs *engine;
583 engine = intel_engine_lookup_user(i915,
584 engine_event_class(event),
585 engine_event_instance(event));
587 if (WARN_ON_ONCE(!engine)) {
589 } else if (sample == I915_SAMPLE_BUSY &&
590 intel_engine_supports_stats(engine)) {
591 val = ktime_to_ns(intel_engine_get_busy_time(engine));
593 val = engine->pmu.sample[sample].cur;
596 switch (event->attr.config) {
597 case I915_PMU_ACTUAL_FREQUENCY:
599 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
600 USEC_PER_SEC /* to MHz */);
602 case I915_PMU_REQUESTED_FREQUENCY:
604 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
605 USEC_PER_SEC /* to MHz */);
607 case I915_PMU_INTERRUPTS:
608 val = count_interrupts(i915);
610 case I915_PMU_RC6_RESIDENCY:
611 val = get_rc6(&i915->gt);
619 static void i915_pmu_event_read(struct perf_event *event)
621 struct hw_perf_event *hwc = &event->hw;
625 prev = local64_read(&hwc->prev_count);
626 new = __i915_pmu_event_read(event);
628 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
631 local64_add(new - prev, &event->count);
634 static void i915_pmu_enable(struct perf_event *event)
636 struct drm_i915_private *i915 =
637 container_of(event->pmu, typeof(*i915), pmu.base);
638 unsigned int bit = event_enabled_bit(event);
639 struct i915_pmu *pmu = &i915->pmu;
640 intel_wakeref_t wakeref;
643 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
644 spin_lock_irqsave(&pmu->lock, flags);
647 * Update the bitmask of enabled events and increment
648 * the event reference counter.
650 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
651 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
652 GEM_BUG_ON(pmu->enable_count[bit] == ~0);
654 if (pmu->enable_count[bit] == 0 &&
655 config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
656 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
657 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
658 pmu->sleep_last = ktime_get();
661 pmu->enable |= BIT_ULL(bit);
662 pmu->enable_count[bit]++;
665 * Start the sampling timer if needed and not already enabled.
667 __i915_pmu_maybe_start_timer(pmu);
670 * For per-engine events the bitmask and reference counting
671 * is stored per engine.
673 if (is_engine_event(event)) {
674 u8 sample = engine_event_sample(event);
675 struct intel_engine_cs *engine;
677 engine = intel_engine_lookup_user(i915,
678 engine_event_class(event),
679 engine_event_instance(event));
681 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
682 I915_ENGINE_SAMPLE_COUNT);
683 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
684 I915_ENGINE_SAMPLE_COUNT);
685 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
686 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
687 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
689 engine->pmu.enable |= BIT(sample);
690 engine->pmu.enable_count[sample]++;
693 spin_unlock_irqrestore(&pmu->lock, flags);
696 * Store the current counter value so we can report the correct delta
697 * for all listeners. Even when the event was already enabled and has
698 * an existing non-zero value.
700 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
702 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
705 static void i915_pmu_disable(struct perf_event *event)
707 struct drm_i915_private *i915 =
708 container_of(event->pmu, typeof(*i915), pmu.base);
709 unsigned int bit = event_enabled_bit(event);
710 struct i915_pmu *pmu = &i915->pmu;
713 spin_lock_irqsave(&pmu->lock, flags);
715 if (is_engine_event(event)) {
716 u8 sample = engine_event_sample(event);
717 struct intel_engine_cs *engine;
719 engine = intel_engine_lookup_user(i915,
720 engine_event_class(event),
721 engine_event_instance(event));
723 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
724 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
725 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
728 * Decrement the reference count and clear the enabled
729 * bitmask when the last listener on an event goes away.
731 if (--engine->pmu.enable_count[sample] == 0)
732 engine->pmu.enable &= ~BIT(sample);
735 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
736 GEM_BUG_ON(pmu->enable_count[bit] == 0);
738 * Decrement the reference count and clear the enabled
739 * bitmask when the last listener on an event goes away.
741 if (--pmu->enable_count[bit] == 0) {
742 pmu->enable &= ~BIT_ULL(bit);
743 pmu->timer_enabled &= pmu_needs_timer(pmu, true);
746 spin_unlock_irqrestore(&pmu->lock, flags);
749 static void i915_pmu_event_start(struct perf_event *event, int flags)
751 i915_pmu_enable(event);
755 static void i915_pmu_event_stop(struct perf_event *event, int flags)
757 if (flags & PERF_EF_UPDATE)
758 i915_pmu_event_read(event);
759 i915_pmu_disable(event);
760 event->hw.state = PERF_HES_STOPPED;
763 static int i915_pmu_event_add(struct perf_event *event, int flags)
765 if (flags & PERF_EF_START)
766 i915_pmu_event_start(event, flags);
771 static void i915_pmu_event_del(struct perf_event *event, int flags)
773 i915_pmu_event_stop(event, PERF_EF_UPDATE);
776 static int i915_pmu_event_event_idx(struct perf_event *event)
781 struct i915_str_attribute {
782 struct device_attribute attr;
786 static ssize_t i915_pmu_format_show(struct device *dev,
787 struct device_attribute *attr, char *buf)
789 struct i915_str_attribute *eattr;
791 eattr = container_of(attr, struct i915_str_attribute, attr);
792 return sprintf(buf, "%s\n", eattr->str);
795 #define I915_PMU_FORMAT_ATTR(_name, _config) \
796 (&((struct i915_str_attribute[]) { \
797 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
801 static struct attribute *i915_pmu_format_attrs[] = {
802 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
806 static const struct attribute_group i915_pmu_format_attr_group = {
808 .attrs = i915_pmu_format_attrs,
811 struct i915_ext_attribute {
812 struct device_attribute attr;
816 static ssize_t i915_pmu_event_show(struct device *dev,
817 struct device_attribute *attr, char *buf)
819 struct i915_ext_attribute *eattr;
821 eattr = container_of(attr, struct i915_ext_attribute, attr);
822 return sprintf(buf, "config=0x%lx\n", eattr->val);
825 static struct attribute_group i915_pmu_events_attr_group = {
827 /* Patch in attrs at runtime. */
831 i915_pmu_get_attr_cpumask(struct device *dev,
832 struct device_attribute *attr,
835 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
838 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
840 static struct attribute *i915_cpumask_attrs[] = {
841 &dev_attr_cpumask.attr,
845 static const struct attribute_group i915_pmu_cpumask_attr_group = {
846 .attrs = i915_cpumask_attrs,
849 static const struct attribute_group *i915_pmu_attr_groups[] = {
850 &i915_pmu_format_attr_group,
851 &i915_pmu_events_attr_group,
852 &i915_pmu_cpumask_attr_group,
856 #define __event(__config, __name, __unit) \
858 .config = (__config), \
863 #define __engine_event(__sample, __name) \
865 .sample = (__sample), \
869 static struct i915_ext_attribute *
870 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
872 sysfs_attr_init(&attr->attr.attr);
873 attr->attr.attr.name = name;
874 attr->attr.attr.mode = 0444;
875 attr->attr.show = i915_pmu_event_show;
881 static struct perf_pmu_events_attr *
882 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
885 sysfs_attr_init(&attr->attr.attr);
886 attr->attr.attr.name = name;
887 attr->attr.attr.mode = 0444;
888 attr->attr.show = perf_event_sysfs_show;
889 attr->event_str = str;
894 static struct attribute **
895 create_event_attributes(struct i915_pmu *pmu)
897 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
898 static const struct {
903 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
904 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
905 __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
906 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
908 static const struct {
909 enum drm_i915_pmu_engine_sample sample;
911 } engine_events[] = {
912 __engine_event(I915_SAMPLE_BUSY, "busy"),
913 __engine_event(I915_SAMPLE_SEMA, "sema"),
914 __engine_event(I915_SAMPLE_WAIT, "wait"),
916 unsigned int count = 0;
917 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
918 struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
919 struct attribute **attr = NULL, **attr_iter;
920 struct intel_engine_cs *engine;
923 /* Count how many counters we will be exposing. */
924 for (i = 0; i < ARRAY_SIZE(events); i++) {
925 if (!config_status(i915, events[i].config))
929 for_each_uabi_engine(engine, i915) {
930 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
931 if (!engine_event_status(engine,
932 engine_events[i].sample))
937 /* Allocate attribute objects and table. */
938 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
942 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
946 /* Max one pointer of each attribute type plus a termination entry. */
947 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
951 i915_iter = i915_attr;
955 /* Initialize supported non-engine counters. */
956 for (i = 0; i < ARRAY_SIZE(events); i++) {
959 if (config_status(i915, events[i].config))
962 str = kstrdup(events[i].name, GFP_KERNEL);
966 *attr_iter++ = &i915_iter->attr.attr;
967 i915_iter = add_i915_attr(i915_iter, str, events[i].config);
969 if (events[i].unit) {
970 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
974 *attr_iter++ = &pmu_iter->attr.attr;
975 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
979 /* Initialize supported engine counters. */
980 for_each_uabi_engine(engine, i915) {
981 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
984 if (engine_event_status(engine,
985 engine_events[i].sample))
988 str = kasprintf(GFP_KERNEL, "%s-%s",
989 engine->name, engine_events[i].name);
993 *attr_iter++ = &i915_iter->attr.attr;
995 add_i915_attr(i915_iter, str,
996 __I915_PMU_ENGINE(engine->uabi_class,
997 engine->uabi_instance,
998 engine_events[i].sample));
1000 str = kasprintf(GFP_KERNEL, "%s-%s.unit",
1001 engine->name, engine_events[i].name);
1005 *attr_iter++ = &pmu_iter->attr.attr;
1006 pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
1010 pmu->i915_attr = i915_attr;
1011 pmu->pmu_attr = pmu_attr;
1016 for (attr_iter = attr; *attr_iter; attr_iter++)
1017 kfree((*attr_iter)->name);
1027 static void free_event_attributes(struct i915_pmu *pmu)
1029 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
1031 for (; *attr_iter; attr_iter++)
1032 kfree((*attr_iter)->name);
1034 kfree(i915_pmu_events_attr_group.attrs);
1035 kfree(pmu->i915_attr);
1036 kfree(pmu->pmu_attr);
1038 i915_pmu_events_attr_group.attrs = NULL;
1039 pmu->i915_attr = NULL;
1040 pmu->pmu_attr = NULL;
1043 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1045 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
1047 GEM_BUG_ON(!pmu->base.event_init);
1049 /* Select the first online CPU as a designated reader. */
1050 if (!cpumask_weight(&i915_pmu_cpumask))
1051 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
1056 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
1058 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
1059 unsigned int target;
1061 GEM_BUG_ON(!pmu->base.event_init);
1063 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
1064 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
1065 /* Migrate events if there is a valid target */
1066 if (target < nr_cpu_ids) {
1067 cpumask_set_cpu(target, &i915_pmu_cpumask);
1068 perf_pmu_migrate_context(&pmu->base, cpu, target);
1075 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
1077 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1079 enum cpuhp_state slot;
1082 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1083 "perf/x86/intel/i915:online",
1084 i915_pmu_cpu_online,
1085 i915_pmu_cpu_offline);
1090 ret = cpuhp_state_add_instance(slot, &pmu->node);
1092 cpuhp_remove_multi_state(slot);
1100 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1102 WARN_ON(cpuhp_slot == CPUHP_INVALID);
1103 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
1104 cpuhp_remove_multi_state(cpuhp_slot);
1107 static bool is_igp(struct drm_i915_private *i915)
1109 struct pci_dev *pdev = i915->drm.pdev;
1111 /* IGP is 0000:00:02.0 */
1112 return pci_domain_nr(pdev->bus) == 0 &&
1113 pdev->bus->number == 0 &&
1114 PCI_SLOT(pdev->devfn) == 2 &&
1115 PCI_FUNC(pdev->devfn) == 0;
1118 void i915_pmu_register(struct drm_i915_private *i915)
1120 struct i915_pmu *pmu = &i915->pmu;
1123 if (INTEL_GEN(i915) <= 2) {
1124 dev_info(i915->drm.dev, "PMU not supported for this GPU.");
1128 spin_lock_init(&pmu->lock);
1129 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1130 pmu->timer.function = i915_sample;
1132 if (!is_igp(i915)) {
1133 pmu->name = kasprintf(GFP_KERNEL,
1135 dev_name(i915->drm.dev));
1137 /* tools/perf reserves colons as special. */
1138 strreplace((char *)pmu->name, ':', '_');
1146 i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
1147 if (!i915_pmu_events_attr_group.attrs)
1150 pmu->base.attr_groups = i915_pmu_attr_groups;
1151 pmu->base.task_ctx_nr = perf_invalid_context;
1152 pmu->base.event_init = i915_pmu_event_init;
1153 pmu->base.add = i915_pmu_event_add;
1154 pmu->base.del = i915_pmu_event_del;
1155 pmu->base.start = i915_pmu_event_start;
1156 pmu->base.stop = i915_pmu_event_stop;
1157 pmu->base.read = i915_pmu_event_read;
1158 pmu->base.event_idx = i915_pmu_event_event_idx;
1160 ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1164 ret = i915_pmu_register_cpuhp_state(pmu);
1171 perf_pmu_unregister(&pmu->base);
1173 pmu->base.event_init = NULL;
1174 free_event_attributes(pmu);
1179 dev_notice(i915->drm.dev, "Failed to register PMU!\n");
1182 void i915_pmu_unregister(struct drm_i915_private *i915)
1184 struct i915_pmu *pmu = &i915->pmu;
1186 if (!pmu->base.event_init)
1189 WARN_ON(pmu->enable);
1191 hrtimer_cancel(&pmu->timer);
1193 i915_pmu_unregister_cpuhp_state(pmu);
1195 perf_pmu_unregister(&pmu->base);
1196 pmu->base.event_init = NULL;
1199 free_event_attributes(pmu);