2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017-2018 Intel Corporation
8 #include <linux/pm_runtime.h>
10 #include "gt/intel_engine.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_engine_user.h"
13 #include "gt/intel_gt_pm.h"
14 #include "gt/intel_rc6.h"
15 #include "gt/intel_rps.h"
21 /* Frequency for the sampling timer for events which need it. */
23 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
25 #define ENGINE_SAMPLE_MASK \
26 (BIT(I915_SAMPLE_BUSY) | \
27 BIT(I915_SAMPLE_WAIT) | \
28 BIT(I915_SAMPLE_SEMA))
30 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
32 static cpumask_t i915_pmu_cpumask;
33 static unsigned int i915_pmu_target_cpu = -1;
35 static u8 engine_config_sample(u64 config)
37 return config & I915_PMU_SAMPLE_MASK;
40 static u8 engine_event_sample(struct perf_event *event)
42 return engine_config_sample(event->attr.config);
45 static u8 engine_event_class(struct perf_event *event)
47 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
50 static u8 engine_event_instance(struct perf_event *event)
52 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
55 static bool is_engine_config(u64 config)
57 return config < __I915_PMU_OTHER(0);
60 static unsigned int config_enabled_bit(u64 config)
62 if (is_engine_config(config))
63 return engine_config_sample(config);
65 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
68 static u64 config_enabled_mask(u64 config)
70 return BIT_ULL(config_enabled_bit(config));
73 static bool is_engine_event(struct perf_event *event)
75 return is_engine_config(event->attr.config);
78 static unsigned int event_enabled_bit(struct perf_event *event)
80 return config_enabled_bit(event->attr.config);
83 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
85 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
89 * Only some counters need the sampling timer.
91 * We start with a bitmask of all currently enabled events.
96 * Mask out all the ones which do not need the timer, or in
97 * other words keep all the ones that could need the timer.
99 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
100 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
104 * When the GPU is idle per-engine counters do not need to be
105 * running so clear those bits out.
108 enable &= ~ENGINE_SAMPLE_MASK;
110 * Also there is software busyness tracking available we do not
111 * need the timer for I915_SAMPLE_BUSY counter.
113 else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
114 enable &= ~BIT(I915_SAMPLE_BUSY);
117 * If some bits remain it means we need the sampling timer running.
122 static u64 __get_rc6(struct intel_gt *gt)
124 struct drm_i915_private *i915 = gt->i915;
127 val = intel_rc6_residency_ns(>->rc6,
128 IS_VALLEYVIEW(i915) ?
133 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p);
136 val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp);
141 #if IS_ENABLED(CONFIG_PM)
143 static inline s64 ktime_since(const ktime_t kt)
145 return ktime_to_ns(ktime_sub(ktime_get(), kt));
148 static u64 get_rc6(struct intel_gt *gt)
150 struct drm_i915_private *i915 = gt->i915;
151 struct i915_pmu *pmu = &i915->pmu;
156 if (intel_gt_pm_get_if_awake(gt)) {
158 intel_gt_pm_put_async(gt);
162 spin_lock_irqsave(&pmu->lock, flags);
165 pmu->sample[__I915_SAMPLE_RC6].cur = val;
168 * We think we are runtime suspended.
170 * Report the delta from when the device was suspended to now,
171 * on top of the last known real value, as the approximated RC6
174 val = ktime_since(pmu->sleep_last);
175 val += pmu->sample[__I915_SAMPLE_RC6].cur;
178 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
179 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
181 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
183 spin_unlock_irqrestore(&pmu->lock, flags);
188 static void park_rc6(struct drm_i915_private *i915)
190 struct i915_pmu *pmu = &i915->pmu;
192 if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
193 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
195 pmu->sleep_last = ktime_get();
200 static u64 get_rc6(struct intel_gt *gt)
202 return __get_rc6(gt);
205 static void park_rc6(struct drm_i915_private *i915) {}
209 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
211 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
212 pmu->timer_enabled = true;
213 pmu->timer_last = ktime_get();
214 hrtimer_start_range_ns(&pmu->timer,
215 ns_to_ktime(PERIOD), 0,
216 HRTIMER_MODE_REL_PINNED);
220 void i915_pmu_gt_parked(struct drm_i915_private *i915)
222 struct i915_pmu *pmu = &i915->pmu;
224 if (!pmu->base.event_init)
227 spin_lock_irq(&pmu->lock);
232 * Signal sampling timer to stop if only engine events are enabled and
235 pmu->timer_enabled = pmu_needs_timer(pmu, false);
237 spin_unlock_irq(&pmu->lock);
240 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
242 struct i915_pmu *pmu = &i915->pmu;
244 if (!pmu->base.event_init)
247 spin_lock_irq(&pmu->lock);
250 * Re-enable sampling timer when GPU goes active.
252 __i915_pmu_maybe_start_timer(pmu);
254 spin_unlock_irq(&pmu->lock);
258 add_sample(struct i915_pmu_sample *sample, u32 val)
263 static bool exclusive_mmio_access(const struct drm_i915_private *i915)
266 * We have to avoid concurrent mmio cache line access on gen7 or
267 * risk a machine hang. For a fun history lesson dig out the old
268 * userspace intel_gpu_top and run it on Ivybridge or Haswell!
270 return IS_GEN(i915, 7);
273 static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
275 struct intel_engine_pmu *pmu = &engine->pmu;
279 val = ENGINE_READ_FW(engine, RING_CTL);
280 if (val == 0) /* powerwell off => engine idle */
284 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
285 if (val & RING_WAIT_SEMAPHORE)
286 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
288 /* No need to sample when busy stats are supported. */
289 if (intel_engine_supports_stats(engine))
293 * While waiting on a semaphore or event, MI_MODE reports the
294 * ring as idle. However, previously using the seqno, and with
295 * execlists sampling, we account for the ring waiting as the
296 * engine being busy. Therefore, we record the sample as being
297 * busy if either waiting or !idle.
299 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
301 val = ENGINE_READ_FW(engine, RING_MI_MODE);
302 busy = !(val & MODE_IDLE);
305 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
309 engines_sample(struct intel_gt *gt, unsigned int period_ns)
311 struct drm_i915_private *i915 = gt->i915;
312 struct intel_engine_cs *engine;
313 enum intel_engine_id id;
316 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
319 if (!intel_gt_pm_is_awake(gt))
322 for_each_engine(engine, gt, id) {
323 if (!intel_engine_pm_get_if_awake(engine))
326 if (exclusive_mmio_access(i915)) {
327 spin_lock_irqsave(&engine->uncore->lock, flags);
328 engine_sample(engine, period_ns);
329 spin_unlock_irqrestore(&engine->uncore->lock, flags);
331 engine_sample(engine, period_ns);
334 intel_engine_pm_put_async(engine);
339 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
341 sample->cur += mul_u32_u32(val, mul);
344 static bool frequency_sampling_enabled(struct i915_pmu *pmu)
347 (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
348 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY));
352 frequency_sample(struct intel_gt *gt, unsigned int period_ns)
354 struct drm_i915_private *i915 = gt->i915;
355 struct intel_uncore *uncore = gt->uncore;
356 struct i915_pmu *pmu = &i915->pmu;
357 struct intel_rps *rps = >->rps;
359 if (!frequency_sampling_enabled(pmu))
362 /* Report 0/0 (actual/requested) frequency while parked. */
363 if (!intel_gt_pm_get_if_awake(gt))
366 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
370 * We take a quick peek here without using forcewake
371 * so that we don't perturb the system under observation
372 * (forcewake => !rc6 => increased power use). We expect
373 * that if the read fails because it is outside of the
374 * mmio power well, then it will return 0 -- in which
375 * case we assume the system is running at the intended
376 * frequency. Fortunately, the read should rarely fail!
378 val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1);
380 val = intel_rps_get_cagf(rps, val);
384 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
385 intel_gpu_freq(rps, val), period_ns / 1000);
388 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
389 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
390 intel_gpu_freq(rps, rps->cur_freq),
394 intel_gt_pm_put_async(gt);
397 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
399 struct drm_i915_private *i915 =
400 container_of(hrtimer, struct drm_i915_private, pmu.timer);
401 struct i915_pmu *pmu = &i915->pmu;
402 struct intel_gt *gt = &i915->gt;
403 unsigned int period_ns;
406 if (!READ_ONCE(pmu->timer_enabled))
407 return HRTIMER_NORESTART;
410 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
411 pmu->timer_last = now;
414 * Strictly speaking the passed in period may not be 100% accurate for
415 * all internal calculation, since some amount of time can be spent on
416 * grabbing the forcewake. However the potential error from timer call-
417 * back delay greatly dominates this so we keep it simple.
419 engines_sample(gt, period_ns);
420 frequency_sample(gt, period_ns);
422 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
424 return HRTIMER_RESTART;
427 static u64 count_interrupts(struct drm_i915_private *i915)
429 /* open-coded kstat_irqs() */
430 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
434 if (!desc || !desc->kstat_irqs)
437 for_each_possible_cpu(cpu)
438 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
443 static void i915_pmu_event_destroy(struct perf_event *event)
445 struct drm_i915_private *i915 =
446 container_of(event->pmu, typeof(*i915), pmu.base);
448 drm_WARN_ON(&i915->drm, event->parent);
450 drm_dev_put(&i915->drm);
454 engine_event_status(struct intel_engine_cs *engine,
455 enum drm_i915_pmu_engine_sample sample)
458 case I915_SAMPLE_BUSY:
459 case I915_SAMPLE_WAIT:
461 case I915_SAMPLE_SEMA:
462 if (INTEL_GEN(engine->i915) < 6)
473 config_status(struct drm_i915_private *i915, u64 config)
476 case I915_PMU_ACTUAL_FREQUENCY:
477 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
478 /* Requires a mutex for sampling! */
481 case I915_PMU_REQUESTED_FREQUENCY:
482 if (INTEL_GEN(i915) < 6)
485 case I915_PMU_INTERRUPTS:
487 case I915_PMU_RC6_RESIDENCY:
498 static int engine_event_init(struct perf_event *event)
500 struct drm_i915_private *i915 =
501 container_of(event->pmu, typeof(*i915), pmu.base);
502 struct intel_engine_cs *engine;
504 engine = intel_engine_lookup_user(i915, engine_event_class(event),
505 engine_event_instance(event));
509 return engine_event_status(engine, engine_event_sample(event));
512 static int i915_pmu_event_init(struct perf_event *event)
514 struct drm_i915_private *i915 =
515 container_of(event->pmu, typeof(*i915), pmu.base);
516 struct i915_pmu *pmu = &i915->pmu;
522 if (event->attr.type != event->pmu->type)
525 /* unsupported modes and filters */
526 if (event->attr.sample_period) /* no sampling */
529 if (has_branch_stack(event))
535 /* only allow running on one cpu at a time */
536 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
539 if (is_engine_event(event))
540 ret = engine_event_init(event);
542 ret = config_status(i915, event->attr.config);
546 if (!event->parent) {
547 drm_dev_get(&i915->drm);
548 event->destroy = i915_pmu_event_destroy;
554 static u64 __i915_pmu_event_read(struct perf_event *event)
556 struct drm_i915_private *i915 =
557 container_of(event->pmu, typeof(*i915), pmu.base);
558 struct i915_pmu *pmu = &i915->pmu;
561 if (is_engine_event(event)) {
562 u8 sample = engine_event_sample(event);
563 struct intel_engine_cs *engine;
565 engine = intel_engine_lookup_user(i915,
566 engine_event_class(event),
567 engine_event_instance(event));
569 if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
571 } else if (sample == I915_SAMPLE_BUSY &&
572 intel_engine_supports_stats(engine)) {
575 val = ktime_to_ns(intel_engine_get_busy_time(engine,
578 val = engine->pmu.sample[sample].cur;
581 switch (event->attr.config) {
582 case I915_PMU_ACTUAL_FREQUENCY:
584 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
585 USEC_PER_SEC /* to MHz */);
587 case I915_PMU_REQUESTED_FREQUENCY:
589 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
590 USEC_PER_SEC /* to MHz */);
592 case I915_PMU_INTERRUPTS:
593 val = count_interrupts(i915);
595 case I915_PMU_RC6_RESIDENCY:
596 val = get_rc6(&i915->gt);
604 static void i915_pmu_event_read(struct perf_event *event)
606 struct drm_i915_private *i915 =
607 container_of(event->pmu, typeof(*i915), pmu.base);
608 struct hw_perf_event *hwc = &event->hw;
609 struct i915_pmu *pmu = &i915->pmu;
613 event->hw.state = PERF_HES_STOPPED;
617 prev = local64_read(&hwc->prev_count);
618 new = __i915_pmu_event_read(event);
620 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
623 local64_add(new - prev, &event->count);
626 static void i915_pmu_enable(struct perf_event *event)
628 struct drm_i915_private *i915 =
629 container_of(event->pmu, typeof(*i915), pmu.base);
630 unsigned int bit = event_enabled_bit(event);
631 struct i915_pmu *pmu = &i915->pmu;
632 intel_wakeref_t wakeref;
635 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
636 spin_lock_irqsave(&pmu->lock, flags);
639 * Update the bitmask of enabled events and increment
640 * the event reference counter.
642 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
643 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
644 GEM_BUG_ON(pmu->enable_count[bit] == ~0);
646 if (pmu->enable_count[bit] == 0 &&
647 config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
648 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
649 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
650 pmu->sleep_last = ktime_get();
653 pmu->enable |= BIT_ULL(bit);
654 pmu->enable_count[bit]++;
657 * Start the sampling timer if needed and not already enabled.
659 __i915_pmu_maybe_start_timer(pmu);
662 * For per-engine events the bitmask and reference counting
663 * is stored per engine.
665 if (is_engine_event(event)) {
666 u8 sample = engine_event_sample(event);
667 struct intel_engine_cs *engine;
669 engine = intel_engine_lookup_user(i915,
670 engine_event_class(event),
671 engine_event_instance(event));
673 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
674 I915_ENGINE_SAMPLE_COUNT);
675 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
676 I915_ENGINE_SAMPLE_COUNT);
677 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
678 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
679 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
681 engine->pmu.enable |= BIT(sample);
682 engine->pmu.enable_count[sample]++;
685 spin_unlock_irqrestore(&pmu->lock, flags);
688 * Store the current counter value so we can report the correct delta
689 * for all listeners. Even when the event was already enabled and has
690 * an existing non-zero value.
692 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
694 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
697 static void i915_pmu_disable(struct perf_event *event)
699 struct drm_i915_private *i915 =
700 container_of(event->pmu, typeof(*i915), pmu.base);
701 unsigned int bit = event_enabled_bit(event);
702 struct i915_pmu *pmu = &i915->pmu;
705 spin_lock_irqsave(&pmu->lock, flags);
707 if (is_engine_event(event)) {
708 u8 sample = engine_event_sample(event);
709 struct intel_engine_cs *engine;
711 engine = intel_engine_lookup_user(i915,
712 engine_event_class(event),
713 engine_event_instance(event));
715 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
716 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
717 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
720 * Decrement the reference count and clear the enabled
721 * bitmask when the last listener on an event goes away.
723 if (--engine->pmu.enable_count[sample] == 0)
724 engine->pmu.enable &= ~BIT(sample);
727 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
728 GEM_BUG_ON(pmu->enable_count[bit] == 0);
730 * Decrement the reference count and clear the enabled
731 * bitmask when the last listener on an event goes away.
733 if (--pmu->enable_count[bit] == 0) {
734 pmu->enable &= ~BIT_ULL(bit);
735 pmu->timer_enabled &= pmu_needs_timer(pmu, true);
738 spin_unlock_irqrestore(&pmu->lock, flags);
741 static void i915_pmu_event_start(struct perf_event *event, int flags)
743 struct drm_i915_private *i915 =
744 container_of(event->pmu, typeof(*i915), pmu.base);
745 struct i915_pmu *pmu = &i915->pmu;
750 i915_pmu_enable(event);
754 static void i915_pmu_event_stop(struct perf_event *event, int flags)
756 if (flags & PERF_EF_UPDATE)
757 i915_pmu_event_read(event);
758 i915_pmu_disable(event);
759 event->hw.state = PERF_HES_STOPPED;
762 static int i915_pmu_event_add(struct perf_event *event, int flags)
764 struct drm_i915_private *i915 =
765 container_of(event->pmu, typeof(*i915), pmu.base);
766 struct i915_pmu *pmu = &i915->pmu;
771 if (flags & PERF_EF_START)
772 i915_pmu_event_start(event, flags);
777 static void i915_pmu_event_del(struct perf_event *event, int flags)
779 i915_pmu_event_stop(event, PERF_EF_UPDATE);
782 static int i915_pmu_event_event_idx(struct perf_event *event)
787 struct i915_str_attribute {
788 struct device_attribute attr;
792 static ssize_t i915_pmu_format_show(struct device *dev,
793 struct device_attribute *attr, char *buf)
795 struct i915_str_attribute *eattr;
797 eattr = container_of(attr, struct i915_str_attribute, attr);
798 return sprintf(buf, "%s\n", eattr->str);
801 #define I915_PMU_FORMAT_ATTR(_name, _config) \
802 (&((struct i915_str_attribute[]) { \
803 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
807 static struct attribute *i915_pmu_format_attrs[] = {
808 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
812 static const struct attribute_group i915_pmu_format_attr_group = {
814 .attrs = i915_pmu_format_attrs,
817 struct i915_ext_attribute {
818 struct device_attribute attr;
822 static ssize_t i915_pmu_event_show(struct device *dev,
823 struct device_attribute *attr, char *buf)
825 struct i915_ext_attribute *eattr;
827 eattr = container_of(attr, struct i915_ext_attribute, attr);
828 return sprintf(buf, "config=0x%lx\n", eattr->val);
832 i915_pmu_get_attr_cpumask(struct device *dev,
833 struct device_attribute *attr,
836 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
839 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
841 static struct attribute *i915_cpumask_attrs[] = {
842 &dev_attr_cpumask.attr,
846 static const struct attribute_group i915_pmu_cpumask_attr_group = {
847 .attrs = i915_cpumask_attrs,
850 #define __event(__config, __name, __unit) \
852 .config = (__config), \
857 #define __engine_event(__sample, __name) \
859 .sample = (__sample), \
863 static struct i915_ext_attribute *
864 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
866 sysfs_attr_init(&attr->attr.attr);
867 attr->attr.attr.name = name;
868 attr->attr.attr.mode = 0444;
869 attr->attr.show = i915_pmu_event_show;
875 static struct perf_pmu_events_attr *
876 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
879 sysfs_attr_init(&attr->attr.attr);
880 attr->attr.attr.name = name;
881 attr->attr.attr.mode = 0444;
882 attr->attr.show = perf_event_sysfs_show;
883 attr->event_str = str;
888 static struct attribute **
889 create_event_attributes(struct i915_pmu *pmu)
891 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
892 static const struct {
897 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
898 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
899 __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
900 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
902 static const struct {
903 enum drm_i915_pmu_engine_sample sample;
905 } engine_events[] = {
906 __engine_event(I915_SAMPLE_BUSY, "busy"),
907 __engine_event(I915_SAMPLE_SEMA, "sema"),
908 __engine_event(I915_SAMPLE_WAIT, "wait"),
910 unsigned int count = 0;
911 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
912 struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
913 struct attribute **attr = NULL, **attr_iter;
914 struct intel_engine_cs *engine;
917 /* Count how many counters we will be exposing. */
918 for (i = 0; i < ARRAY_SIZE(events); i++) {
919 if (!config_status(i915, events[i].config))
923 for_each_uabi_engine(engine, i915) {
924 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
925 if (!engine_event_status(engine,
926 engine_events[i].sample))
931 /* Allocate attribute objects and table. */
932 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
936 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
940 /* Max one pointer of each attribute type plus a termination entry. */
941 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
945 i915_iter = i915_attr;
949 /* Initialize supported non-engine counters. */
950 for (i = 0; i < ARRAY_SIZE(events); i++) {
953 if (config_status(i915, events[i].config))
956 str = kstrdup(events[i].name, GFP_KERNEL);
960 *attr_iter++ = &i915_iter->attr.attr;
961 i915_iter = add_i915_attr(i915_iter, str, events[i].config);
963 if (events[i].unit) {
964 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
968 *attr_iter++ = &pmu_iter->attr.attr;
969 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
973 /* Initialize supported engine counters. */
974 for_each_uabi_engine(engine, i915) {
975 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
978 if (engine_event_status(engine,
979 engine_events[i].sample))
982 str = kasprintf(GFP_KERNEL, "%s-%s",
983 engine->name, engine_events[i].name);
987 *attr_iter++ = &i915_iter->attr.attr;
989 add_i915_attr(i915_iter, str,
990 __I915_PMU_ENGINE(engine->uabi_class,
991 engine->uabi_instance,
992 engine_events[i].sample));
994 str = kasprintf(GFP_KERNEL, "%s-%s.unit",
995 engine->name, engine_events[i].name);
999 *attr_iter++ = &pmu_iter->attr.attr;
1000 pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
1004 pmu->i915_attr = i915_attr;
1005 pmu->pmu_attr = pmu_attr;
1010 for (attr_iter = attr; *attr_iter; attr_iter++)
1011 kfree((*attr_iter)->name);
1021 static void free_event_attributes(struct i915_pmu *pmu)
1023 struct attribute **attr_iter = pmu->events_attr_group.attrs;
1025 for (; *attr_iter; attr_iter++)
1026 kfree((*attr_iter)->name);
1028 kfree(pmu->events_attr_group.attrs);
1029 kfree(pmu->i915_attr);
1030 kfree(pmu->pmu_attr);
1032 pmu->events_attr_group.attrs = NULL;
1033 pmu->i915_attr = NULL;
1034 pmu->pmu_attr = NULL;
1037 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1039 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1041 GEM_BUG_ON(!pmu->base.event_init);
1043 /* Select the first online CPU as a designated reader. */
1044 if (!cpumask_weight(&i915_pmu_cpumask))
1045 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
1050 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
1052 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1053 unsigned int target = i915_pmu_target_cpu;
1055 GEM_BUG_ON(!pmu->base.event_init);
1058 * Unregistering an instance generates a CPU offline event which we must
1059 * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
1064 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
1065 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
1067 /* Migrate events if there is a valid target */
1068 if (target < nr_cpu_ids) {
1069 cpumask_set_cpu(target, &i915_pmu_cpumask);
1070 i915_pmu_target_cpu = target;
1074 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
1075 perf_pmu_migrate_context(&pmu->base, cpu, target);
1076 pmu->cpuhp.cpu = target;
1082 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
1084 void i915_pmu_init(void)
1088 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1089 "perf/x86/intel/i915:online",
1090 i915_pmu_cpu_online,
1091 i915_pmu_cpu_offline);
1093 pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
1099 void i915_pmu_exit(void)
1101 if (cpuhp_slot != CPUHP_INVALID)
1102 cpuhp_remove_multi_state(cpuhp_slot);
1105 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1107 if (cpuhp_slot == CPUHP_INVALID)
1110 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
1113 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1115 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
1118 static bool is_igp(struct drm_i915_private *i915)
1120 struct pci_dev *pdev = i915->drm.pdev;
1122 /* IGP is 0000:00:02.0 */
1123 return pci_domain_nr(pdev->bus) == 0 &&
1124 pdev->bus->number == 0 &&
1125 PCI_SLOT(pdev->devfn) == 2 &&
1126 PCI_FUNC(pdev->devfn) == 0;
1129 void i915_pmu_register(struct drm_i915_private *i915)
1131 struct i915_pmu *pmu = &i915->pmu;
1132 const struct attribute_group *attr_groups[] = {
1133 &i915_pmu_format_attr_group,
1134 &pmu->events_attr_group,
1135 &i915_pmu_cpumask_attr_group,
1141 if (INTEL_GEN(i915) <= 2) {
1142 drm_info(&i915->drm, "PMU not supported for this GPU.");
1146 spin_lock_init(&pmu->lock);
1147 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1148 pmu->timer.function = i915_sample;
1149 pmu->cpuhp.cpu = -1;
1151 if (!is_igp(i915)) {
1152 pmu->name = kasprintf(GFP_KERNEL,
1154 dev_name(i915->drm.dev));
1156 /* tools/perf reserves colons as special. */
1157 strreplace((char *)pmu->name, ':', '_');
1165 pmu->events_attr_group.name = "events";
1166 pmu->events_attr_group.attrs = create_event_attributes(pmu);
1167 if (!pmu->events_attr_group.attrs)
1170 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
1172 if (!pmu->base.attr_groups)
1175 pmu->base.module = THIS_MODULE;
1176 pmu->base.task_ctx_nr = perf_invalid_context;
1177 pmu->base.event_init = i915_pmu_event_init;
1178 pmu->base.add = i915_pmu_event_add;
1179 pmu->base.del = i915_pmu_event_del;
1180 pmu->base.start = i915_pmu_event_start;
1181 pmu->base.stop = i915_pmu_event_stop;
1182 pmu->base.read = i915_pmu_event_read;
1183 pmu->base.event_idx = i915_pmu_event_event_idx;
1185 ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1189 ret = i915_pmu_register_cpuhp_state(pmu);
1196 perf_pmu_unregister(&pmu->base);
1198 kfree(pmu->base.attr_groups);
1200 pmu->base.event_init = NULL;
1201 free_event_attributes(pmu);
1206 drm_notice(&i915->drm, "Failed to register PMU!\n");
1209 void i915_pmu_unregister(struct drm_i915_private *i915)
1211 struct i915_pmu *pmu = &i915->pmu;
1213 if (!pmu->base.event_init)
1217 * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
1218 * ensures all currently executing ones will have exited before we
1219 * proceed with unregistration.
1224 hrtimer_cancel(&pmu->timer);
1226 i915_pmu_unregister_cpuhp_state(pmu);
1228 perf_pmu_unregister(&pmu->base);
1229 pmu->base.event_init = NULL;
1230 kfree(pmu->base.attr_groups);
1233 free_event_attributes(pmu);