2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/pm_runtime.h>
25 #include <asm/iosf_mbi.h>
28 #include "i915_vgpu.h"
29 #include "intel_drv.h"
32 #define FORCEWAKE_ACK_TIMEOUT_MS 50
33 #define GT_FIFO_TIMEOUT_MS 10
35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
37 static const char * const forcewake_domain_names[] = {
50 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
52 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
54 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
55 return forcewake_domain_names[id];
62 #define fw_ack(d) readl((d)->reg_ack)
63 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
64 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
67 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
70 * We don't really know if the powerwell for the forcewake domain we are
71 * trying to reset here does exist at this point (engines could be fused
72 * off in ICL+), so no waiting for acks
74 /* WaRsClearFWBitsAtReset:bdw,skl */
79 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
82 hrtimer_start_range_ns(&d->timer,
89 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
93 return wait_for_atomic((fw_ack(d) & ack) == value,
94 FORCEWAKE_ACK_TIMEOUT_MS);
98 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
101 return __wait_for_ack(d, ack, 0);
105 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
108 return __wait_for_ack(d, ack, ack);
112 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
114 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
115 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
116 intel_uncore_forcewake_domain_to_str(d->id));
117 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
127 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
128 const enum ack_type type)
130 const u32 ack_bit = FORCEWAKE_KERNEL;
131 const u32 value = type == ACK_SET ? ack_bit : 0;
136 * There is a possibility of driver's wake request colliding
137 * with hardware's own wake requests and that can cause
138 * hardware to not deliver the driver's ack message.
140 * Use a fallback bit toggle to kick the gpu state machine
141 * in the hope that the original ack will be delivered along with
144 * This workaround is described in HSDES #1604254524 and it's known as:
145 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
146 * although the name is a bit misleading.
151 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
153 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
154 /* Give gt some time to relax before the polling frenzy */
156 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
158 ack_detected = (fw_ack(d) & ack_bit) == value;
160 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
161 } while (!ack_detected && pass++ < 10);
163 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
164 intel_uncore_forcewake_domain_to_str(d->id),
165 type == ACK_SET ? "set" : "clear",
169 return ack_detected ? 0 : -ETIMEDOUT;
173 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
175 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
178 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
179 fw_domain_wait_ack_clear(d);
183 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
185 fw_set(d, FORCEWAKE_KERNEL);
189 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
191 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
192 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
193 intel_uncore_forcewake_domain_to_str(d->id));
194 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
199 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
201 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
204 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
205 fw_domain_wait_ack_set(d);
209 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
211 fw_clear(d, FORCEWAKE_KERNEL);
215 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
217 struct intel_uncore_forcewake_domain *d;
220 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
222 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
223 fw_domain_wait_ack_clear(d);
227 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
228 fw_domain_wait_ack_set(d);
230 uncore->fw_domains_active |= fw_domains;
234 fw_domains_get_with_fallback(struct intel_uncore *uncore,
235 enum forcewake_domains fw_domains)
237 struct intel_uncore_forcewake_domain *d;
240 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
242 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
243 fw_domain_wait_ack_clear_fallback(d);
247 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
248 fw_domain_wait_ack_set_fallback(d);
250 uncore->fw_domains_active |= fw_domains;
254 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
256 struct intel_uncore_forcewake_domain *d;
259 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
261 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
264 uncore->fw_domains_active &= ~fw_domains;
268 fw_domains_reset(struct intel_uncore *uncore,
269 enum forcewake_domains fw_domains)
271 struct intel_uncore_forcewake_domain *d;
277 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
279 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
283 static inline u32 gt_thread_status(struct intel_uncore *uncore)
287 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
288 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
293 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
296 * w/a for a sporadic read returning 0 by waiting for the GT
299 WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
300 "GT thread status wait timed out\n");
303 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
304 enum forcewake_domains fw_domains)
306 fw_domains_get(uncore, fw_domains);
308 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
309 __gen6_gt_wait_for_thread_c0(uncore);
312 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
314 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
316 return count & GT_FIFO_FREE_ENTRIES_MASK;
319 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
323 /* On VLV, FIFO will be shared by both SW and HW.
324 * So, we need to read the FREE_ENTRIES everytime */
325 if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
326 n = fifo_free_entries(uncore);
328 n = uncore->fifo_count;
330 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
331 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
332 GT_FIFO_NUM_RESERVED_ENTRIES,
333 GT_FIFO_TIMEOUT_MS)) {
334 DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
339 uncore->fifo_count = n - 1;
342 static enum hrtimer_restart
343 intel_uncore_fw_release_timer(struct hrtimer *timer)
345 struct intel_uncore_forcewake_domain *domain =
346 container_of(timer, struct intel_uncore_forcewake_domain, timer);
347 struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
348 unsigned long irqflags;
350 assert_rpm_device_not_suspended(uncore->rpm);
352 if (xchg(&domain->active, false))
353 return HRTIMER_RESTART;
355 spin_lock_irqsave(&uncore->lock, irqflags);
356 if (WARN_ON(domain->wake_count == 0))
357 domain->wake_count++;
359 if (--domain->wake_count == 0)
360 uncore->funcs.force_wake_put(uncore, domain->mask);
362 spin_unlock_irqrestore(&uncore->lock, irqflags);
364 return HRTIMER_NORESTART;
367 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
369 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
371 unsigned long irqflags;
372 struct intel_uncore_forcewake_domain *domain;
373 int retry_count = 100;
374 enum forcewake_domains fw, active_domains;
376 iosf_mbi_assert_punit_acquired();
378 /* Hold uncore.lock across reset to prevent any register access
379 * with forcewake not set correctly. Wait until all pending
380 * timers are run before holding.
387 for_each_fw_domain(domain, uncore, tmp) {
388 smp_store_mb(domain->active, false);
389 if (hrtimer_cancel(&domain->timer) == 0)
392 intel_uncore_fw_release_timer(&domain->timer);
395 spin_lock_irqsave(&uncore->lock, irqflags);
397 for_each_fw_domain(domain, uncore, tmp) {
398 if (hrtimer_active(&domain->timer))
399 active_domains |= domain->mask;
402 if (active_domains == 0)
405 if (--retry_count == 0) {
406 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
410 spin_unlock_irqrestore(&uncore->lock, irqflags);
414 WARN_ON(active_domains);
416 fw = uncore->fw_domains_active;
418 uncore->funcs.force_wake_put(uncore, fw);
420 fw_domains_reset(uncore, uncore->fw_domains);
421 assert_forcewakes_inactive(uncore);
423 spin_unlock_irqrestore(&uncore->lock, irqflags);
425 return fw; /* track the lost user forcewake domains */
429 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
433 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
434 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
437 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
443 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
447 cer = __raw_uncore_read32(uncore, CLAIM_ER);
448 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
451 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
457 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
461 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
463 if (unlikely(fifodbg)) {
464 DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
465 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
472 check_for_unclaimed_mmio(struct intel_uncore *uncore)
476 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
477 ret |= fpga_check_for_unclaimed_mmio(uncore);
479 if (intel_uncore_has_dbg_unclaimed(uncore))
480 ret |= vlv_check_for_unclaimed_mmio(uncore);
482 if (intel_uncore_has_fifo(uncore))
483 ret |= gen6_check_for_fifo_debug(uncore);
488 static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
489 unsigned int restore_forcewake)
491 /* clear out unclaimed reg detection bit */
492 if (check_for_unclaimed_mmio(uncore))
493 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
495 /* WaDisableShadowRegForCpd:chv */
496 if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
497 __raw_uncore_write32(uncore, GTFIFOCTL,
498 __raw_uncore_read32(uncore, GTFIFOCTL) |
499 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
500 GT_FIFO_CTL_RC6_POLICY_STALL);
503 iosf_mbi_punit_acquire();
504 intel_uncore_forcewake_reset(uncore);
505 if (restore_forcewake) {
506 spin_lock_irq(&uncore->lock);
507 uncore->funcs.force_wake_get(uncore, restore_forcewake);
509 if (intel_uncore_has_fifo(uncore))
510 uncore->fifo_count = fifo_free_entries(uncore);
511 spin_unlock_irq(&uncore->lock);
513 iosf_mbi_punit_release();
516 void intel_uncore_suspend(struct intel_uncore *uncore)
518 iosf_mbi_punit_acquire();
519 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
520 &uncore->pmic_bus_access_nb);
521 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
522 iosf_mbi_punit_release();
525 void intel_uncore_resume_early(struct intel_uncore *uncore)
527 unsigned int restore_forcewake;
529 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
530 __intel_uncore_early_sanitize(uncore, restore_forcewake);
532 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
535 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
537 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
540 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
542 /* BIOS often leaves RC6 enabled, but disable it for hw init */
543 intel_sanitize_gt_powersave(dev_priv);
546 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
547 enum forcewake_domains fw_domains)
549 struct intel_uncore_forcewake_domain *domain;
552 fw_domains &= uncore->fw_domains;
554 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
555 if (domain->wake_count++) {
556 fw_domains &= ~domain->mask;
557 domain->active = true;
562 uncore->funcs.force_wake_get(uncore, fw_domains);
566 * intel_uncore_forcewake_get - grab forcewake domain references
567 * @uncore: the intel_uncore structure
568 * @fw_domains: forcewake domains to get reference on
570 * This function can be used get GT's forcewake domain references.
571 * Normal register access will handle the forcewake domains automatically.
572 * However if some sequence requires the GT to not power down a particular
573 * forcewake domains this function should be called at the beginning of the
574 * sequence. And subsequently the reference should be dropped by symmetric
575 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
576 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
578 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
579 enum forcewake_domains fw_domains)
581 unsigned long irqflags;
583 if (!uncore->funcs.force_wake_get)
586 __assert_rpm_wakelock_held(uncore->rpm);
588 spin_lock_irqsave(&uncore->lock, irqflags);
589 __intel_uncore_forcewake_get(uncore, fw_domains);
590 spin_unlock_irqrestore(&uncore->lock, irqflags);
594 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
595 * @uncore: the intel_uncore structure
597 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
598 * the GT powerwell and in the process disable our debugging for the
599 * duration of userspace's bypass.
601 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
603 spin_lock_irq(&uncore->lock);
604 if (!uncore->user_forcewake.count++) {
605 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
607 /* Save and disable mmio debugging for the user bypass */
608 uncore->user_forcewake.saved_mmio_check =
609 uncore->unclaimed_mmio_check;
610 uncore->user_forcewake.saved_mmio_debug =
611 i915_modparams.mmio_debug;
613 uncore->unclaimed_mmio_check = 0;
614 i915_modparams.mmio_debug = 0;
616 spin_unlock_irq(&uncore->lock);
620 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
621 * @uncore: the intel_uncore structure
623 * This function complements intel_uncore_forcewake_user_get() and releases
624 * the GT powerwell taken on behalf of the userspace bypass.
626 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
628 spin_lock_irq(&uncore->lock);
629 if (!--uncore->user_forcewake.count) {
630 if (intel_uncore_unclaimed_mmio(uncore))
631 dev_info(uncore_to_i915(uncore)->drm.dev,
632 "Invalid mmio detected during user access\n");
634 uncore->unclaimed_mmio_check =
635 uncore->user_forcewake.saved_mmio_check;
636 i915_modparams.mmio_debug =
637 uncore->user_forcewake.saved_mmio_debug;
639 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
641 spin_unlock_irq(&uncore->lock);
645 * intel_uncore_forcewake_get__locked - grab forcewake domain references
646 * @uncore: the intel_uncore structure
647 * @fw_domains: forcewake domains to get reference on
649 * See intel_uncore_forcewake_get(). This variant places the onus
650 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
652 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
653 enum forcewake_domains fw_domains)
655 lockdep_assert_held(&uncore->lock);
657 if (!uncore->funcs.force_wake_get)
660 __intel_uncore_forcewake_get(uncore, fw_domains);
663 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
664 enum forcewake_domains fw_domains)
666 struct intel_uncore_forcewake_domain *domain;
669 fw_domains &= uncore->fw_domains;
671 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
672 if (WARN_ON(domain->wake_count == 0))
675 if (--domain->wake_count) {
676 domain->active = true;
680 fw_domain_arm_timer(domain);
685 * intel_uncore_forcewake_put - release a forcewake domain reference
686 * @uncore: the intel_uncore structure
687 * @fw_domains: forcewake domains to put references
689 * This function drops the device-level forcewakes for specified
690 * domains obtained by intel_uncore_forcewake_get().
692 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
693 enum forcewake_domains fw_domains)
695 unsigned long irqflags;
697 if (!uncore->funcs.force_wake_put)
700 spin_lock_irqsave(&uncore->lock, irqflags);
701 __intel_uncore_forcewake_put(uncore, fw_domains);
702 spin_unlock_irqrestore(&uncore->lock, irqflags);
706 * intel_uncore_forcewake_put__locked - grab forcewake domain references
707 * @uncore: the intel_uncore structure
708 * @fw_domains: forcewake domains to get reference on
710 * See intel_uncore_forcewake_put(). This variant places the onus
711 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
713 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
714 enum forcewake_domains fw_domains)
716 lockdep_assert_held(&uncore->lock);
718 if (!uncore->funcs.force_wake_put)
721 __intel_uncore_forcewake_put(uncore, fw_domains);
724 void assert_forcewakes_inactive(struct intel_uncore *uncore)
726 if (!uncore->funcs.force_wake_get)
729 WARN(uncore->fw_domains_active,
730 "Expected all fw_domains to be inactive, but %08x are still on\n",
731 uncore->fw_domains_active);
734 void assert_forcewakes_active(struct intel_uncore *uncore,
735 enum forcewake_domains fw_domains)
737 if (!uncore->funcs.force_wake_get)
740 __assert_rpm_wakelock_held(uncore->rpm);
742 fw_domains &= uncore->fw_domains;
743 WARN(fw_domains & ~uncore->fw_domains_active,
744 "Expected %08x fw_domains to be active, but %08x are off\n",
745 fw_domains, fw_domains & ~uncore->fw_domains_active);
748 /* We give fast paths for the really cool registers */
749 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
751 #define GEN11_NEEDS_FORCE_WAKE(reg) \
752 ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
754 #define __gen6_reg_read_fw_domains(uncore, offset) \
756 enum forcewake_domains __fwd; \
757 if (NEEDS_FORCE_WAKE(offset)) \
758 __fwd = FORCEWAKE_RENDER; \
764 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
766 if (offset < entry->start)
768 else if (offset > entry->end)
774 /* Copied and "macroized" from lib/bsearch.c */
775 #define BSEARCH(key, base, num, cmp) ({ \
776 unsigned int start__ = 0, end__ = (num); \
777 typeof(base) result__ = NULL; \
778 while (start__ < end__) { \
779 unsigned int mid__ = start__ + (end__ - start__) / 2; \
780 int ret__ = (cmp)((key), (base) + mid__); \
783 } else if (ret__ > 0) { \
784 start__ = mid__ + 1; \
786 result__ = (base) + mid__; \
793 static enum forcewake_domains
794 find_fw_domain(struct intel_uncore *uncore, u32 offset)
796 const struct intel_forcewake_range *entry;
798 entry = BSEARCH(offset,
799 uncore->fw_domains_table,
800 uncore->fw_domains_table_entries,
807 * The list of FW domains depends on the SKU in gen11+ so we
808 * can't determine it statically. We use FORCEWAKE_ALL and
809 * translate it here to the list of available domains.
811 if (entry->domains == FORCEWAKE_ALL)
812 return uncore->fw_domains;
814 WARN(entry->domains & ~uncore->fw_domains,
815 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
816 entry->domains & ~uncore->fw_domains, offset);
818 return entry->domains;
821 #define GEN_FW_RANGE(s, e, d) \
822 { .start = (s), .end = (e), .domains = (d) }
824 #define HAS_FWTABLE(dev_priv) \
825 (INTEL_GEN(dev_priv) >= 9 || \
826 IS_CHERRYVIEW(dev_priv) || \
827 IS_VALLEYVIEW(dev_priv))
829 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
830 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
831 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
832 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
833 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
834 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
835 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
836 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
837 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
840 #define __fwtable_reg_read_fw_domains(uncore, offset) \
842 enum forcewake_domains __fwd = 0; \
843 if (NEEDS_FORCE_WAKE((offset))) \
844 __fwd = find_fw_domain(uncore, offset); \
848 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
850 enum forcewake_domains __fwd = 0; \
851 if (GEN11_NEEDS_FORCE_WAKE((offset))) \
852 __fwd = find_fw_domain(uncore, offset); \
856 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
857 static const i915_reg_t gen8_shadowed_regs[] = {
858 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
859 GEN6_RPNSWREQ, /* 0xA008 */
860 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
861 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
862 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
863 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
864 /* TODO: Other registers are not yet used */
867 static const i915_reg_t gen11_shadowed_regs[] = {
868 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
869 GEN6_RPNSWREQ, /* 0xA008 */
870 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
871 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
872 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
873 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
874 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
875 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
876 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
877 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
878 /* TODO: Other registers are not yet used */
881 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
883 u32 offset = i915_mmio_reg_offset(*reg);
887 else if (key > offset)
893 #define __is_genX_shadowed(x) \
894 static bool is_gen##x##_shadowed(u32 offset) \
896 const i915_reg_t *regs = gen##x##_shadowed_regs; \
897 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
901 __is_genX_shadowed(8)
902 __is_genX_shadowed(11)
904 #define __gen8_reg_write_fw_domains(uncore, offset) \
906 enum forcewake_domains __fwd; \
907 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
908 __fwd = FORCEWAKE_RENDER; \
914 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
915 static const struct intel_forcewake_range __chv_fw_ranges[] = {
916 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
917 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
918 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
919 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
920 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
921 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
922 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
923 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
924 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
925 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
926 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
927 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
928 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
929 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
930 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
931 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
934 #define __fwtable_reg_write_fw_domains(uncore, offset) \
936 enum forcewake_domains __fwd = 0; \
937 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
938 __fwd = find_fw_domain(uncore, offset); \
942 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
944 enum forcewake_domains __fwd = 0; \
945 if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
946 __fwd = find_fw_domain(uncore, offset); \
950 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
951 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
952 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
953 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
954 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
955 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
956 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
957 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
958 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
959 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
960 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
961 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
962 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
963 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
964 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
965 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
966 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
967 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
968 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
969 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
970 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
971 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
972 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
973 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
974 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
975 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
976 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
977 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
978 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
979 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
980 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
981 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
982 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
983 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
986 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
987 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
988 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
989 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
990 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
991 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
992 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
993 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
994 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
995 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
996 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
997 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
998 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
999 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1000 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1001 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1002 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1003 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1004 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1005 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1006 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1007 GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER),
1008 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1009 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1010 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1011 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1012 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1013 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1014 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1015 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1016 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1017 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1021 ilk_dummy_write(struct intel_uncore *uncore)
1023 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1024 * the chip from rc6 before touching it for real. MI_MODE is masked,
1025 * hence harmless to write 0 into. */
1026 __raw_uncore_write32(uncore, MI_MODE, 0);
1030 __unclaimed_reg_debug(struct intel_uncore *uncore,
1031 const i915_reg_t reg,
1035 if (WARN(check_for_unclaimed_mmio(uncore) && !before,
1036 "Unclaimed %s register 0x%x\n",
1037 read ? "read from" : "write to",
1038 i915_mmio_reg_offset(reg)))
1039 /* Only report the first N failures */
1040 i915_modparams.mmio_debug--;
1044 unclaimed_reg_debug(struct intel_uncore *uncore,
1045 const i915_reg_t reg,
1049 if (likely(!i915_modparams.mmio_debug))
1052 __unclaimed_reg_debug(uncore, reg, read, before);
1055 #define GEN2_READ_HEADER(x) \
1057 __assert_rpm_wakelock_held(uncore->rpm);
1059 #define GEN2_READ_FOOTER \
1060 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1063 #define __gen2_read(x) \
1065 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1066 GEN2_READ_HEADER(x); \
1067 val = __raw_uncore_read##x(uncore, reg); \
1071 #define __gen5_read(x) \
1073 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1074 GEN2_READ_HEADER(x); \
1075 ilk_dummy_write(uncore); \
1076 val = __raw_uncore_read##x(uncore, reg); \
1092 #undef GEN2_READ_FOOTER
1093 #undef GEN2_READ_HEADER
1095 #define GEN6_READ_HEADER(x) \
1096 u32 offset = i915_mmio_reg_offset(reg); \
1097 unsigned long irqflags; \
1099 __assert_rpm_wakelock_held(uncore->rpm); \
1100 spin_lock_irqsave(&uncore->lock, irqflags); \
1101 unclaimed_reg_debug(uncore, reg, true, true)
1103 #define GEN6_READ_FOOTER \
1104 unclaimed_reg_debug(uncore, reg, true, false); \
1105 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1106 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1109 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1110 enum forcewake_domains fw_domains)
1112 struct intel_uncore_forcewake_domain *domain;
1115 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1117 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1118 fw_domain_arm_timer(domain);
1120 uncore->funcs.force_wake_get(uncore, fw_domains);
1123 static inline void __force_wake_auto(struct intel_uncore *uncore,
1124 enum forcewake_domains fw_domains)
1126 if (WARN_ON(!fw_domains))
1129 /* Turn on all requested but inactive supported forcewake domains. */
1130 fw_domains &= uncore->fw_domains;
1131 fw_domains &= ~uncore->fw_domains_active;
1134 ___force_wake_auto(uncore, fw_domains);
1137 #define __gen_read(func, x) \
1139 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1140 enum forcewake_domains fw_engine; \
1141 GEN6_READ_HEADER(x); \
1142 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1144 __force_wake_auto(uncore, fw_engine); \
1145 val = __raw_uncore_read##x(uncore, reg); \
1148 #define __gen6_read(x) __gen_read(gen6, x)
1149 #define __fwtable_read(x) __gen_read(fwtable, x)
1150 #define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
1152 __gen11_fwtable_read(8)
1153 __gen11_fwtable_read(16)
1154 __gen11_fwtable_read(32)
1155 __gen11_fwtable_read(64)
1165 #undef __gen11_fwtable_read
1166 #undef __fwtable_read
1168 #undef GEN6_READ_FOOTER
1169 #undef GEN6_READ_HEADER
1171 #define GEN2_WRITE_HEADER \
1172 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1173 __assert_rpm_wakelock_held(uncore->rpm); \
1175 #define GEN2_WRITE_FOOTER
1177 #define __gen2_write(x) \
1179 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1180 GEN2_WRITE_HEADER; \
1181 __raw_uncore_write##x(uncore, reg, val); \
1182 GEN2_WRITE_FOOTER; \
1185 #define __gen5_write(x) \
1187 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1188 GEN2_WRITE_HEADER; \
1189 ilk_dummy_write(uncore); \
1190 __raw_uncore_write##x(uncore, reg, val); \
1191 GEN2_WRITE_FOOTER; \
1204 #undef GEN2_WRITE_FOOTER
1205 #undef GEN2_WRITE_HEADER
1207 #define GEN6_WRITE_HEADER \
1208 u32 offset = i915_mmio_reg_offset(reg); \
1209 unsigned long irqflags; \
1210 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1211 __assert_rpm_wakelock_held(uncore->rpm); \
1212 spin_lock_irqsave(&uncore->lock, irqflags); \
1213 unclaimed_reg_debug(uncore, reg, false, true)
1215 #define GEN6_WRITE_FOOTER \
1216 unclaimed_reg_debug(uncore, reg, false, false); \
1217 spin_unlock_irqrestore(&uncore->lock, irqflags)
1219 #define __gen6_write(x) \
1221 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1222 GEN6_WRITE_HEADER; \
1223 if (NEEDS_FORCE_WAKE(offset)) \
1224 __gen6_gt_wait_for_fifo(uncore); \
1225 __raw_uncore_write##x(uncore, reg, val); \
1226 GEN6_WRITE_FOOTER; \
1229 #define __gen_write(func, x) \
1231 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1232 enum forcewake_domains fw_engine; \
1233 GEN6_WRITE_HEADER; \
1234 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1236 __force_wake_auto(uncore, fw_engine); \
1237 __raw_uncore_write##x(uncore, reg, val); \
1238 GEN6_WRITE_FOOTER; \
1240 #define __gen8_write(x) __gen_write(gen8, x)
1241 #define __fwtable_write(x) __gen_write(fwtable, x)
1242 #define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
1244 __gen11_fwtable_write(8)
1245 __gen11_fwtable_write(16)
1246 __gen11_fwtable_write(32)
1257 #undef __gen11_fwtable_write
1258 #undef __fwtable_write
1261 #undef GEN6_WRITE_FOOTER
1262 #undef GEN6_WRITE_HEADER
1264 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1266 (uncore)->funcs.mmio_writeb = x##_write8; \
1267 (uncore)->funcs.mmio_writew = x##_write16; \
1268 (uncore)->funcs.mmio_writel = x##_write32; \
1271 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1273 (uncore)->funcs.mmio_readb = x##_read8; \
1274 (uncore)->funcs.mmio_readw = x##_read16; \
1275 (uncore)->funcs.mmio_readl = x##_read32; \
1276 (uncore)->funcs.mmio_readq = x##_read64; \
1280 static void fw_domain_init(struct intel_uncore *uncore,
1281 enum forcewake_domain_id domain_id,
1285 struct intel_uncore_forcewake_domain *d;
1287 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1290 d = &uncore->fw_domain[domain_id];
1292 WARN_ON(d->wake_count);
1294 WARN_ON(!i915_mmio_reg_valid(reg_set));
1295 WARN_ON(!i915_mmio_reg_valid(reg_ack));
1298 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1299 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1303 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1304 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1305 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1306 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1307 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1308 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1309 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1310 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1311 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1314 d->mask = BIT(domain_id);
1316 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1317 d->timer.function = intel_uncore_fw_release_timer;
1319 uncore->fw_domains |= BIT(domain_id);
1324 static void fw_domain_fini(struct intel_uncore *uncore,
1325 enum forcewake_domain_id domain_id)
1327 struct intel_uncore_forcewake_domain *d;
1329 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
1332 d = &uncore->fw_domain[domain_id];
1334 WARN_ON(d->wake_count);
1335 WARN_ON(hrtimer_cancel(&d->timer));
1336 memset(d, 0, sizeof(*d));
1338 uncore->fw_domains &= ~BIT(domain_id);
1341 static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1343 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1345 if (!intel_uncore_has_forcewake(uncore))
1348 if (INTEL_GEN(i915) >= 11) {
1351 uncore->funcs.force_wake_get =
1352 fw_domains_get_with_fallback;
1353 uncore->funcs.force_wake_put = fw_domains_put;
1354 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1355 FORCEWAKE_RENDER_GEN9,
1356 FORCEWAKE_ACK_RENDER_GEN9);
1357 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1358 FORCEWAKE_BLITTER_GEN9,
1359 FORCEWAKE_ACK_BLITTER_GEN9);
1360 for (i = 0; i < I915_MAX_VCS; i++) {
1361 if (!HAS_ENGINE(i915, _VCS(i)))
1364 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1365 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1366 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1368 for (i = 0; i < I915_MAX_VECS; i++) {
1369 if (!HAS_ENGINE(i915, _VECS(i)))
1372 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1373 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1374 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1376 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1377 uncore->funcs.force_wake_get =
1378 fw_domains_get_with_fallback;
1379 uncore->funcs.force_wake_put = fw_domains_put;
1380 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1381 FORCEWAKE_RENDER_GEN9,
1382 FORCEWAKE_ACK_RENDER_GEN9);
1383 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1384 FORCEWAKE_BLITTER_GEN9,
1385 FORCEWAKE_ACK_BLITTER_GEN9);
1386 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1387 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1388 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1389 uncore->funcs.force_wake_get = fw_domains_get;
1390 uncore->funcs.force_wake_put = fw_domains_put;
1391 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1392 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1393 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1394 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1395 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1396 uncore->funcs.force_wake_get =
1397 fw_domains_get_with_thread_status;
1398 uncore->funcs.force_wake_put = fw_domains_put;
1399 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1400 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1401 } else if (IS_IVYBRIDGE(i915)) {
1404 /* IVB configs may use multi-threaded forcewake */
1406 /* A small trick here - if the bios hasn't configured
1407 * MT forcewake, and if the device is in RC6, then
1408 * force_wake_mt_get will not wake the device and the
1409 * ECOBUS read will return zero. Which will be
1410 * (correctly) interpreted by the test below as MT
1411 * forcewake being disabled.
1413 uncore->funcs.force_wake_get =
1414 fw_domains_get_with_thread_status;
1415 uncore->funcs.force_wake_put = fw_domains_put;
1417 /* We need to init first for ECOBUS access and then
1418 * determine later if we want to reinit, in case of MT access is
1419 * not working. In this stage we don't know which flavour this
1420 * ivb is, so it is better to reset also the gen6 fw registers
1421 * before the ecobus check.
1424 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1425 __raw_posting_read(uncore, ECOBUS);
1427 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1428 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1430 spin_lock_irq(&uncore->lock);
1431 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1432 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1433 fw_domains_put(uncore, FORCEWAKE_RENDER);
1434 spin_unlock_irq(&uncore->lock);
1436 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1437 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1438 DRM_INFO("when using vblank-synced partial screen updates.\n");
1439 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1440 FORCEWAKE, FORCEWAKE_ACK);
1442 } else if (IS_GEN(i915, 6)) {
1443 uncore->funcs.force_wake_get =
1444 fw_domains_get_with_thread_status;
1445 uncore->funcs.force_wake_put = fw_domains_put;
1446 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1447 FORCEWAKE, FORCEWAKE_ACK);
1450 /* All future platforms are expected to require complex power gating */
1451 WARN_ON(uncore->fw_domains == 0);
1454 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1456 (uncore)->fw_domains_table = \
1457 (struct intel_forcewake_range *)(d); \
1458 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
1461 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1462 unsigned long action, void *data)
1464 struct drm_i915_private *dev_priv = container_of(nb,
1465 struct drm_i915_private, uncore.pmic_bus_access_nb);
1468 case MBI_PMIC_BUS_ACCESS_BEGIN:
1470 * forcewake all now to make sure that we don't need to do a
1471 * forcewake later which on systems where this notifier gets
1472 * called requires the punit to access to the shared pmic i2c
1473 * bus, which will be busy after this notification, leading to:
1474 * "render: timed out waiting for forcewake ack request."
1477 * The notifier is unregistered during intel_runtime_suspend(),
1478 * so it's ok to access the HW here without holding a RPM
1479 * wake reference -> disable wakeref asserts for the time of
1482 disable_rpm_wakeref_asserts(dev_priv);
1483 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1484 enable_rpm_wakeref_asserts(dev_priv);
1486 case MBI_PMIC_BUS_ACCESS_END:
1487 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1494 static int uncore_mmio_setup(struct intel_uncore *uncore)
1496 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1497 struct pci_dev *pdev = i915->drm.pdev;
1501 mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1503 * Before gen4, the registers and the GTT are behind different BARs.
1504 * However, from gen4 onwards, the registers and the GTT are shared
1505 * in the same BAR, so we want to restrict this ioremap from
1506 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1507 * the register BAR remains the same size for all the earlier
1508 * generations up to Ironlake.
1510 if (INTEL_GEN(i915) < 5)
1511 mmio_size = 512 * 1024;
1513 mmio_size = 2 * 1024 * 1024;
1514 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1515 if (uncore->regs == NULL) {
1516 DRM_ERROR("failed to map registers\n");
1524 static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1526 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1527 struct pci_dev *pdev = i915->drm.pdev;
1529 pci_iounmap(pdev, uncore->regs);
1532 void intel_uncore_init_early(struct intel_uncore *uncore)
1534 spin_lock_init(&uncore->lock);
1537 int intel_uncore_init_mmio(struct intel_uncore *uncore)
1539 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1542 ret = uncore_mmio_setup(uncore);
1546 i915_check_vgpu(i915);
1548 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1549 uncore->flags |= UNCORE_HAS_FORCEWAKE;
1551 intel_uncore_fw_domains_init(uncore);
1552 __intel_uncore_early_sanitize(uncore, 0);
1554 uncore->unclaimed_mmio_check = 1;
1555 uncore->pmic_bus_access_nb.notifier_call =
1556 i915_pmic_bus_access_notifier;
1558 uncore->rpm = &i915->runtime_pm;
1560 if (!intel_uncore_has_forcewake(uncore)) {
1561 if (IS_GEN(i915, 5)) {
1562 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
1563 ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
1565 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
1566 ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
1568 } else if (IS_GEN_RANGE(i915, 6, 7)) {
1569 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1571 if (IS_VALLEYVIEW(i915)) {
1572 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1573 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1575 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1577 } else if (IS_GEN(i915, 8)) {
1578 if (IS_CHERRYVIEW(i915)) {
1579 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1580 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1581 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1584 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1585 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1587 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1588 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1589 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1590 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1592 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1593 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1594 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
1597 if (HAS_FPGA_DBG_UNCLAIMED(i915))
1598 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1600 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1601 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1603 if (IS_GEN_RANGE(i915, 6, 7))
1604 uncore->flags |= UNCORE_HAS_FIFO;
1606 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
1612 * We might have detected that some engines are fused off after we initialized
1613 * the forcewake domains. Prune them, to make sure they only reference existing
1616 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
1618 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1620 if (INTEL_GEN(i915) >= 11) {
1621 enum forcewake_domains fw_domains = uncore->fw_domains;
1622 enum forcewake_domain_id domain_id;
1625 for (i = 0; i < I915_MAX_VCS; i++) {
1626 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1628 if (HAS_ENGINE(i915, _VCS(i)))
1631 if (fw_domains & BIT(domain_id))
1632 fw_domain_fini(uncore, domain_id);
1635 for (i = 0; i < I915_MAX_VECS; i++) {
1636 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1638 if (HAS_ENGINE(i915, _VECS(i)))
1641 if (fw_domains & BIT(domain_id))
1642 fw_domain_fini(uncore, domain_id);
1647 void intel_uncore_fini_mmio(struct intel_uncore *uncore)
1649 /* Paranoia: make sure we have disabled everything before we exit. */
1650 intel_uncore_sanitize(uncore_to_i915(uncore));
1652 iosf_mbi_punit_acquire();
1653 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1654 &uncore->pmic_bus_access_nb);
1655 intel_uncore_forcewake_reset(uncore);
1656 iosf_mbi_punit_release();
1657 uncore_mmio_cleanup(uncore);
1660 static const struct reg_whitelist {
1661 i915_reg_t offset_ldw;
1662 i915_reg_t offset_udw;
1665 } reg_read_whitelist[] = { {
1666 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1667 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1668 .gen_mask = INTEL_GEN_MASK(4, 11),
1672 int i915_reg_read_ioctl(struct drm_device *dev,
1673 void *data, struct drm_file *file)
1675 struct drm_i915_private *dev_priv = to_i915(dev);
1676 struct drm_i915_reg_read *reg = data;
1677 struct reg_whitelist const *entry;
1678 intel_wakeref_t wakeref;
1683 entry = reg_read_whitelist;
1684 remain = ARRAY_SIZE(reg_read_whitelist);
1686 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1688 GEM_BUG_ON(!is_power_of_2(entry->size));
1689 GEM_BUG_ON(entry->size > 8);
1690 GEM_BUG_ON(entry_offset & (entry->size - 1));
1692 if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask &&
1693 entry_offset == (reg->offset & -entry->size))
1702 flags = reg->offset & (entry->size - 1);
1704 with_intel_runtime_pm(dev_priv, wakeref) {
1705 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1706 reg->val = I915_READ64_2x32(entry->offset_ldw,
1708 else if (entry->size == 8 && flags == 0)
1709 reg->val = I915_READ64(entry->offset_ldw);
1710 else if (entry->size == 4 && flags == 0)
1711 reg->val = I915_READ(entry->offset_ldw);
1712 else if (entry->size == 2 && flags == 0)
1713 reg->val = I915_READ16(entry->offset_ldw);
1714 else if (entry->size == 1 && flags == 0)
1715 reg->val = I915_READ8(entry->offset_ldw);
1724 * __intel_wait_for_register_fw - wait until register matches expected state
1725 * @uncore: the struct intel_uncore
1726 * @reg: the register to read
1727 * @mask: mask to apply to register value
1728 * @value: expected value
1729 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1730 * @slow_timeout_ms: slow timeout in millisecond
1731 * @out_value: optional placeholder to hold registry value
1733 * This routine waits until the target register @reg contains the expected
1734 * @value after applying the @mask, i.e. it waits until ::
1736 * (I915_READ_FW(reg) & mask) == value
1738 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1739 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1740 * must be not larger than 20,0000 microseconds.
1742 * Note that this routine assumes the caller holds forcewake asserted, it is
1743 * not suitable for very long waits. See intel_wait_for_register() if you
1744 * wish to wait without holding forcewake for the duration (i.e. you expect
1745 * the wait to be slow).
1747 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1749 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1753 unsigned int fast_timeout_us,
1754 unsigned int slow_timeout_ms,
1757 u32 uninitialized_var(reg_value);
1758 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
1761 /* Catch any overuse of this function */
1762 might_sleep_if(slow_timeout_ms);
1763 GEM_BUG_ON(fast_timeout_us > 20000);
1766 if (fast_timeout_us && fast_timeout_us <= 20000)
1767 ret = _wait_for_atomic(done, fast_timeout_us, 0);
1768 if (ret && slow_timeout_ms)
1769 ret = wait_for(done, slow_timeout_ms);
1772 *out_value = reg_value;
1779 * __intel_wait_for_register - wait until register matches expected state
1780 * @uncore: the struct intel_uncore
1781 * @reg: the register to read
1782 * @mask: mask to apply to register value
1783 * @value: expected value
1784 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1785 * @slow_timeout_ms: slow timeout in millisecond
1786 * @out_value: optional placeholder to hold registry value
1788 * This routine waits until the target register @reg contains the expected
1789 * @value after applying the @mask, i.e. it waits until ::
1791 * (I915_READ(reg) & mask) == value
1793 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1795 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1797 int __intel_wait_for_register(struct intel_uncore *uncore,
1801 unsigned int fast_timeout_us,
1802 unsigned int slow_timeout_ms,
1806 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
1810 might_sleep_if(slow_timeout_ms);
1812 spin_lock_irq(&uncore->lock);
1813 intel_uncore_forcewake_get__locked(uncore, fw);
1815 ret = __intel_wait_for_register_fw(uncore,
1817 fast_timeout_us, 0, ®_value);
1819 intel_uncore_forcewake_put__locked(uncore, fw);
1820 spin_unlock_irq(&uncore->lock);
1822 if (ret && slow_timeout_ms)
1823 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
1825 (reg_value & mask) == value,
1826 slow_timeout_ms * 1000, 10, 1000);
1828 /* just trace the final value */
1829 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
1832 *out_value = reg_value;
1837 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
1839 return check_for_unclaimed_mmio(uncore);
1843 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
1847 spin_lock_irq(&uncore->lock);
1849 if (unlikely(uncore->unclaimed_mmio_check <= 0))
1852 if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
1853 if (!i915_modparams.mmio_debug) {
1854 DRM_DEBUG("Unclaimed register detected, "
1855 "enabling oneshot unclaimed register reporting. "
1856 "Please use i915.mmio_debug=N for more information.\n");
1857 i915_modparams.mmio_debug++;
1859 uncore->unclaimed_mmio_check--;
1864 spin_unlock_irq(&uncore->lock);
1869 static enum forcewake_domains
1870 intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
1873 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1874 u32 offset = i915_mmio_reg_offset(reg);
1875 enum forcewake_domains fw_domains;
1877 if (INTEL_GEN(i915) >= 11) {
1878 fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
1879 } else if (HAS_FWTABLE(i915)) {
1880 fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
1881 } else if (INTEL_GEN(i915) >= 6) {
1882 fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
1884 /* on devices with FW we expect to hit one of the above cases */
1885 if (intel_uncore_has_forcewake(uncore))
1886 MISSING_CASE(INTEL_GEN(i915));
1891 WARN_ON(fw_domains & ~uncore->fw_domains);
1896 static enum forcewake_domains
1897 intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
1900 struct drm_i915_private *i915 = uncore_to_i915(uncore);
1901 u32 offset = i915_mmio_reg_offset(reg);
1902 enum forcewake_domains fw_domains;
1904 if (INTEL_GEN(i915) >= 11) {
1905 fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
1906 } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
1907 fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
1908 } else if (IS_GEN(i915, 8)) {
1909 fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
1910 } else if (IS_GEN_RANGE(i915, 6, 7)) {
1911 fw_domains = FORCEWAKE_RENDER;
1913 /* on devices with FW we expect to hit one of the above cases */
1914 if (intel_uncore_has_forcewake(uncore))
1915 MISSING_CASE(INTEL_GEN(i915));
1920 WARN_ON(fw_domains & ~uncore->fw_domains);
1926 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1928 * @uncore: pointer to struct intel_uncore
1929 * @reg: register in question
1930 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1932 * Returns a set of forcewake domains required to be taken with for example
1933 * intel_uncore_forcewake_get for the specified register to be accessible in the
1934 * specified mode (read, write or read/write) with raw mmio accessors.
1936 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1937 * callers to do FIFO management on their own or risk losing writes.
1939 enum forcewake_domains
1940 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
1941 i915_reg_t reg, unsigned int op)
1943 enum forcewake_domains fw_domains = 0;
1947 if (!intel_uncore_has_forcewake(uncore))
1950 if (op & FW_REG_READ)
1951 fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
1953 if (op & FW_REG_WRITE)
1954 fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
1959 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1960 #include "selftests/mock_uncore.c"
1961 #include "selftests/intel_uncore.c"