2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/pm_runtime.h>
25 #include <asm/iosf_mbi.h>
28 #include "i915_trace.h"
29 #include "i915_vgpu.h"
32 #define FORCEWAKE_ACK_TIMEOUT_MS 50
33 #define GT_FIFO_TIMEOUT_MS 10
35 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
38 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
40 spin_lock_init(&mmio_debug->lock);
41 mmio_debug->unclaimed_mmio_check = 1;
44 static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
46 lockdep_assert_held(&mmio_debug->lock);
48 /* Save and disable mmio debugging for the user bypass */
49 if (!mmio_debug->suspend_count++) {
50 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
51 mmio_debug->unclaimed_mmio_check = 0;
55 static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
57 lockdep_assert_held(&mmio_debug->lock);
59 if (!--mmio_debug->suspend_count)
60 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
63 static const char * const forcewake_domain_names[] = {
76 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
78 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
80 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
81 return forcewake_domain_names[id];
88 #define fw_ack(d) readl((d)->reg_ack)
89 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
90 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
93 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
96 * We don't really know if the powerwell for the forcewake domain we are
97 * trying to reset here does exist at this point (engines could be fused
98 * off in ICL+), so no waiting for acks
100 /* WaRsClearFWBitsAtReset:bdw,skl */
105 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
107 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
108 d->uncore->fw_domains_timer |= d->mask;
110 hrtimer_start_range_ns(&d->timer,
117 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
121 return wait_for_atomic((fw_ack(d) & ack) == value,
122 FORCEWAKE_ACK_TIMEOUT_MS);
126 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
129 return __wait_for_ack(d, ack, 0);
133 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
136 return __wait_for_ack(d, ack, ack);
140 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
142 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
143 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
144 intel_uncore_forcewake_domain_to_str(d->id));
145 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
155 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
156 const enum ack_type type)
158 const u32 ack_bit = FORCEWAKE_KERNEL;
159 const u32 value = type == ACK_SET ? ack_bit : 0;
164 * There is a possibility of driver's wake request colliding
165 * with hardware's own wake requests and that can cause
166 * hardware to not deliver the driver's ack message.
168 * Use a fallback bit toggle to kick the gpu state machine
169 * in the hope that the original ack will be delivered along with
172 * This workaround is described in HSDES #1604254524 and it's known as:
173 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
174 * although the name is a bit misleading.
179 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
181 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
182 /* Give gt some time to relax before the polling frenzy */
184 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
186 ack_detected = (fw_ack(d) & ack_bit) == value;
188 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
189 } while (!ack_detected && pass++ < 10);
191 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
192 intel_uncore_forcewake_domain_to_str(d->id),
193 type == ACK_SET ? "set" : "clear",
197 return ack_detected ? 0 : -ETIMEDOUT;
201 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
203 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
206 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
207 fw_domain_wait_ack_clear(d);
211 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
213 fw_set(d, FORCEWAKE_KERNEL);
217 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
219 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
220 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
221 intel_uncore_forcewake_domain_to_str(d->id));
222 add_taint_for_CI(TAINT_WARN); /* CI now unreliable */
227 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
229 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
232 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
233 fw_domain_wait_ack_set(d);
237 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
239 fw_clear(d, FORCEWAKE_KERNEL);
243 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
245 struct intel_uncore_forcewake_domain *d;
248 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
250 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
251 fw_domain_wait_ack_clear(d);
255 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
256 fw_domain_wait_ack_set(d);
258 uncore->fw_domains_active |= fw_domains;
262 fw_domains_get_with_fallback(struct intel_uncore *uncore,
263 enum forcewake_domains fw_domains)
265 struct intel_uncore_forcewake_domain *d;
268 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
270 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
271 fw_domain_wait_ack_clear_fallback(d);
275 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
276 fw_domain_wait_ack_set_fallback(d);
278 uncore->fw_domains_active |= fw_domains;
282 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
284 struct intel_uncore_forcewake_domain *d;
287 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
289 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
292 uncore->fw_domains_active &= ~fw_domains;
296 fw_domains_reset(struct intel_uncore *uncore,
297 enum forcewake_domains fw_domains)
299 struct intel_uncore_forcewake_domain *d;
305 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
307 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
311 static inline u32 gt_thread_status(struct intel_uncore *uncore)
315 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
316 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
321 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
324 * w/a for a sporadic read returning 0 by waiting for the GT
327 drm_WARN_ONCE(&uncore->i915->drm,
328 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
329 "GT thread status wait timed out\n");
332 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
333 enum forcewake_domains fw_domains)
335 fw_domains_get(uncore, fw_domains);
337 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
338 __gen6_gt_wait_for_thread_c0(uncore);
341 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
343 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
345 return count & GT_FIFO_FREE_ENTRIES_MASK;
348 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
352 /* On VLV, FIFO will be shared by both SW and HW.
353 * So, we need to read the FREE_ENTRIES everytime */
354 if (IS_VALLEYVIEW(uncore->i915))
355 n = fifo_free_entries(uncore);
357 n = uncore->fifo_count;
359 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
360 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
361 GT_FIFO_NUM_RESERVED_ENTRIES,
362 GT_FIFO_TIMEOUT_MS)) {
363 drm_dbg(&uncore->i915->drm,
364 "GT_FIFO timeout, entries: %u\n", n);
369 uncore->fifo_count = n - 1;
372 static enum hrtimer_restart
373 intel_uncore_fw_release_timer(struct hrtimer *timer)
375 struct intel_uncore_forcewake_domain *domain =
376 container_of(timer, struct intel_uncore_forcewake_domain, timer);
377 struct intel_uncore *uncore = domain->uncore;
378 unsigned long irqflags;
380 assert_rpm_device_not_suspended(uncore->rpm);
382 if (xchg(&domain->active, false))
383 return HRTIMER_RESTART;
385 spin_lock_irqsave(&uncore->lock, irqflags);
387 uncore->fw_domains_timer &= ~domain->mask;
389 GEM_BUG_ON(!domain->wake_count);
390 if (--domain->wake_count == 0)
391 uncore->funcs.force_wake_put(uncore, domain->mask);
393 spin_unlock_irqrestore(&uncore->lock, irqflags);
395 return HRTIMER_NORESTART;
398 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
400 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
402 unsigned long irqflags;
403 struct intel_uncore_forcewake_domain *domain;
404 int retry_count = 100;
405 enum forcewake_domains fw, active_domains;
407 iosf_mbi_assert_punit_acquired();
409 /* Hold uncore.lock across reset to prevent any register access
410 * with forcewake not set correctly. Wait until all pending
411 * timers are run before holding.
418 for_each_fw_domain(domain, uncore, tmp) {
419 smp_store_mb(domain->active, false);
420 if (hrtimer_cancel(&domain->timer) == 0)
423 intel_uncore_fw_release_timer(&domain->timer);
426 spin_lock_irqsave(&uncore->lock, irqflags);
428 for_each_fw_domain(domain, uncore, tmp) {
429 if (hrtimer_active(&domain->timer))
430 active_domains |= domain->mask;
433 if (active_domains == 0)
436 if (--retry_count == 0) {
437 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
441 spin_unlock_irqrestore(&uncore->lock, irqflags);
445 drm_WARN_ON(&uncore->i915->drm, active_domains);
447 fw = uncore->fw_domains_active;
449 uncore->funcs.force_wake_put(uncore, fw);
451 fw_domains_reset(uncore, uncore->fw_domains);
452 assert_forcewakes_inactive(uncore);
454 spin_unlock_irqrestore(&uncore->lock, irqflags);
456 return fw; /* track the lost user forcewake domains */
460 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
464 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
465 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
468 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
474 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
478 cer = __raw_uncore_read32(uncore, CLAIM_ER);
479 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
482 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
488 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
492 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
494 if (unlikely(fifodbg)) {
495 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
496 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
503 check_for_unclaimed_mmio(struct intel_uncore *uncore)
507 lockdep_assert_held(&uncore->debug->lock);
509 if (uncore->debug->suspend_count)
512 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
513 ret |= fpga_check_for_unclaimed_mmio(uncore);
515 if (intel_uncore_has_dbg_unclaimed(uncore))
516 ret |= vlv_check_for_unclaimed_mmio(uncore);
518 if (intel_uncore_has_fifo(uncore))
519 ret |= gen6_check_for_fifo_debug(uncore);
524 static void forcewake_early_sanitize(struct intel_uncore *uncore,
525 unsigned int restore_forcewake)
527 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
529 /* WaDisableShadowRegForCpd:chv */
530 if (IS_CHERRYVIEW(uncore->i915)) {
531 __raw_uncore_write32(uncore, GTFIFOCTL,
532 __raw_uncore_read32(uncore, GTFIFOCTL) |
533 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
534 GT_FIFO_CTL_RC6_POLICY_STALL);
537 iosf_mbi_punit_acquire();
538 intel_uncore_forcewake_reset(uncore);
539 if (restore_forcewake) {
540 spin_lock_irq(&uncore->lock);
541 uncore->funcs.force_wake_get(uncore, restore_forcewake);
543 if (intel_uncore_has_fifo(uncore))
544 uncore->fifo_count = fifo_free_entries(uncore);
545 spin_unlock_irq(&uncore->lock);
547 iosf_mbi_punit_release();
550 void intel_uncore_suspend(struct intel_uncore *uncore)
552 if (!intel_uncore_has_forcewake(uncore))
555 iosf_mbi_punit_acquire();
556 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
557 &uncore->pmic_bus_access_nb);
558 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
559 iosf_mbi_punit_release();
562 void intel_uncore_resume_early(struct intel_uncore *uncore)
564 unsigned int restore_forcewake;
566 if (intel_uncore_unclaimed_mmio(uncore))
567 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
569 if (!intel_uncore_has_forcewake(uncore))
572 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
573 forcewake_early_sanitize(uncore, restore_forcewake);
575 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
578 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
580 if (!intel_uncore_has_forcewake(uncore))
583 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
586 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
587 enum forcewake_domains fw_domains)
589 struct intel_uncore_forcewake_domain *domain;
592 fw_domains &= uncore->fw_domains;
594 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
595 if (domain->wake_count++) {
596 fw_domains &= ~domain->mask;
597 domain->active = true;
602 uncore->funcs.force_wake_get(uncore, fw_domains);
606 * intel_uncore_forcewake_get - grab forcewake domain references
607 * @uncore: the intel_uncore structure
608 * @fw_domains: forcewake domains to get reference on
610 * This function can be used get GT's forcewake domain references.
611 * Normal register access will handle the forcewake domains automatically.
612 * However if some sequence requires the GT to not power down a particular
613 * forcewake domains this function should be called at the beginning of the
614 * sequence. And subsequently the reference should be dropped by symmetric
615 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
616 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
618 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
619 enum forcewake_domains fw_domains)
621 unsigned long irqflags;
623 if (!uncore->funcs.force_wake_get)
626 assert_rpm_wakelock_held(uncore->rpm);
628 spin_lock_irqsave(&uncore->lock, irqflags);
629 __intel_uncore_forcewake_get(uncore, fw_domains);
630 spin_unlock_irqrestore(&uncore->lock, irqflags);
634 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
635 * @uncore: the intel_uncore structure
637 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
638 * the GT powerwell and in the process disable our debugging for the
639 * duration of userspace's bypass.
641 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
643 spin_lock_irq(&uncore->lock);
644 if (!uncore->user_forcewake_count++) {
645 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
646 spin_lock(&uncore->debug->lock);
647 mmio_debug_suspend(uncore->debug);
648 spin_unlock(&uncore->debug->lock);
650 spin_unlock_irq(&uncore->lock);
654 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
655 * @uncore: the intel_uncore structure
657 * This function complements intel_uncore_forcewake_user_get() and releases
658 * the GT powerwell taken on behalf of the userspace bypass.
660 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
662 spin_lock_irq(&uncore->lock);
663 if (!--uncore->user_forcewake_count) {
664 spin_lock(&uncore->debug->lock);
665 mmio_debug_resume(uncore->debug);
667 if (check_for_unclaimed_mmio(uncore))
668 drm_info(&uncore->i915->drm,
669 "Invalid mmio detected during user access\n");
670 spin_unlock(&uncore->debug->lock);
672 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
674 spin_unlock_irq(&uncore->lock);
678 * intel_uncore_forcewake_get__locked - grab forcewake domain references
679 * @uncore: the intel_uncore structure
680 * @fw_domains: forcewake domains to get reference on
682 * See intel_uncore_forcewake_get(). This variant places the onus
683 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
685 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
686 enum forcewake_domains fw_domains)
688 lockdep_assert_held(&uncore->lock);
690 if (!uncore->funcs.force_wake_get)
693 __intel_uncore_forcewake_get(uncore, fw_domains);
696 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
697 enum forcewake_domains fw_domains)
699 struct intel_uncore_forcewake_domain *domain;
702 fw_domains &= uncore->fw_domains;
704 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
705 GEM_BUG_ON(!domain->wake_count);
707 if (--domain->wake_count) {
708 domain->active = true;
712 fw_domain_arm_timer(domain);
717 * intel_uncore_forcewake_put - release a forcewake domain reference
718 * @uncore: the intel_uncore structure
719 * @fw_domains: forcewake domains to put references
721 * This function drops the device-level forcewakes for specified
722 * domains obtained by intel_uncore_forcewake_get().
724 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
725 enum forcewake_domains fw_domains)
727 unsigned long irqflags;
729 if (!uncore->funcs.force_wake_put)
732 spin_lock_irqsave(&uncore->lock, irqflags);
733 __intel_uncore_forcewake_put(uncore, fw_domains);
734 spin_unlock_irqrestore(&uncore->lock, irqflags);
738 * intel_uncore_forcewake_flush - flush the delayed release
739 * @uncore: the intel_uncore structure
740 * @fw_domains: forcewake domains to flush
742 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
743 enum forcewake_domains fw_domains)
745 struct intel_uncore_forcewake_domain *domain;
748 if (!uncore->funcs.force_wake_put)
751 fw_domains &= uncore->fw_domains;
752 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
753 WRITE_ONCE(domain->active, false);
754 if (hrtimer_cancel(&domain->timer))
755 intel_uncore_fw_release_timer(&domain->timer);
760 * intel_uncore_forcewake_put__locked - grab forcewake domain references
761 * @uncore: the intel_uncore structure
762 * @fw_domains: forcewake domains to get reference on
764 * See intel_uncore_forcewake_put(). This variant places the onus
765 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
767 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
768 enum forcewake_domains fw_domains)
770 lockdep_assert_held(&uncore->lock);
772 if (!uncore->funcs.force_wake_put)
775 __intel_uncore_forcewake_put(uncore, fw_domains);
778 void assert_forcewakes_inactive(struct intel_uncore *uncore)
780 if (!uncore->funcs.force_wake_get)
783 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
784 "Expected all fw_domains to be inactive, but %08x are still on\n",
785 uncore->fw_domains_active);
788 void assert_forcewakes_active(struct intel_uncore *uncore,
789 enum forcewake_domains fw_domains)
791 struct intel_uncore_forcewake_domain *domain;
794 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
797 if (!uncore->funcs.force_wake_get)
800 spin_lock_irq(&uncore->lock);
802 assert_rpm_wakelock_held(uncore->rpm);
804 fw_domains &= uncore->fw_domains;
805 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
806 "Expected %08x fw_domains to be active, but %08x are off\n",
807 fw_domains, fw_domains & ~uncore->fw_domains_active);
810 * Check that the caller has an explicit wakeref and we don't mistake
811 * it for the auto wakeref.
813 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
814 unsigned int actual = READ_ONCE(domain->wake_count);
815 unsigned int expect = 1;
817 if (uncore->fw_domains_timer & domain->mask)
818 expect++; /* pending automatic release */
820 if (drm_WARN(&uncore->i915->drm, actual < expect,
821 "Expected domain %d to be held awake by caller, count=%d\n",
826 spin_unlock_irq(&uncore->lock);
829 /* We give fast paths for the really cool registers */
830 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
832 #define __gen6_reg_read_fw_domains(uncore, offset) \
834 enum forcewake_domains __fwd; \
835 if (NEEDS_FORCE_WAKE(offset)) \
836 __fwd = FORCEWAKE_RENDER; \
842 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
844 if (offset < entry->start)
846 else if (offset > entry->end)
852 /* Copied and "macroized" from lib/bsearch.c */
853 #define BSEARCH(key, base, num, cmp) ({ \
854 unsigned int start__ = 0, end__ = (num); \
855 typeof(base) result__ = NULL; \
856 while (start__ < end__) { \
857 unsigned int mid__ = start__ + (end__ - start__) / 2; \
858 int ret__ = (cmp)((key), (base) + mid__); \
861 } else if (ret__ > 0) { \
862 start__ = mid__ + 1; \
864 result__ = (base) + mid__; \
871 static enum forcewake_domains
872 find_fw_domain(struct intel_uncore *uncore, u32 offset)
874 const struct intel_forcewake_range *entry;
876 entry = BSEARCH(offset,
877 uncore->fw_domains_table,
878 uncore->fw_domains_table_entries,
885 * The list of FW domains depends on the SKU in gen11+ so we
886 * can't determine it statically. We use FORCEWAKE_ALL and
887 * translate it here to the list of available domains.
889 if (entry->domains == FORCEWAKE_ALL)
890 return uncore->fw_domains;
892 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
893 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
894 entry->domains & ~uncore->fw_domains, offset);
896 return entry->domains;
899 #define GEN_FW_RANGE(s, e, d) \
900 { .start = (s), .end = (e), .domains = (d) }
902 #define HAS_FWTABLE(dev_priv) \
903 (INTEL_GEN(dev_priv) >= 9 || \
904 IS_CHERRYVIEW(dev_priv) || \
905 IS_VALLEYVIEW(dev_priv))
907 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
908 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
909 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
910 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
911 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
912 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
913 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
914 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
915 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
918 #define __fwtable_reg_read_fw_domains(uncore, offset) \
920 enum forcewake_domains __fwd = 0; \
921 if (NEEDS_FORCE_WAKE((offset))) \
922 __fwd = find_fw_domain(uncore, offset); \
926 #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
927 find_fw_domain(uncore, offset)
929 #define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
930 find_fw_domain(uncore, offset)
932 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
933 static const i915_reg_t gen8_shadowed_regs[] = {
934 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
935 GEN6_RPNSWREQ, /* 0xA008 */
936 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
937 RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
938 RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
939 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
940 /* TODO: Other registers are not yet used */
943 static const i915_reg_t gen11_shadowed_regs[] = {
944 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
945 GEN6_RPNSWREQ, /* 0xA008 */
946 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
947 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
948 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
949 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
950 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
951 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
952 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
953 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
954 /* TODO: Other registers are not yet used */
957 static const i915_reg_t gen12_shadowed_regs[] = {
958 RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
959 GEN6_RPNSWREQ, /* 0xA008 */
960 GEN6_RC_VIDEO_FREQ, /* 0xA00C */
961 RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
962 RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
963 RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
964 RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
965 RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
966 RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
967 RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
968 /* TODO: Other registers are not yet used */
971 static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
973 u32 offset = i915_mmio_reg_offset(*reg);
977 else if (key > offset)
983 #define __is_genX_shadowed(x) \
984 static bool is_gen##x##_shadowed(u32 offset) \
986 const i915_reg_t *regs = gen##x##_shadowed_regs; \
987 return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
991 __is_genX_shadowed(8)
992 __is_genX_shadowed(11)
993 __is_genX_shadowed(12)
995 static enum forcewake_domains
996 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
998 return FORCEWAKE_RENDER;
1001 #define __gen8_reg_write_fw_domains(uncore, offset) \
1003 enum forcewake_domains __fwd; \
1004 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
1005 __fwd = FORCEWAKE_RENDER; \
1011 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1012 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1013 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1014 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1015 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1016 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1017 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1018 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1019 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1020 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1021 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1022 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1023 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1024 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1025 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1026 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1027 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1028 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1031 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1033 enum forcewake_domains __fwd = 0; \
1034 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
1035 __fwd = find_fw_domain(uncore, offset); \
1039 #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
1041 enum forcewake_domains __fwd = 0; \
1042 const u32 __offset = (offset); \
1043 if (!is_gen11_shadowed(__offset)) \
1044 __fwd = find_fw_domain(uncore, __offset); \
1048 #define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
1050 enum forcewake_domains __fwd = 0; \
1051 const u32 __offset = (offset); \
1052 if (!is_gen12_shadowed(__offset)) \
1053 __fwd = find_fw_domain(uncore, __offset); \
1057 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1058 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1059 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1060 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1061 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1062 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1063 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1064 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1065 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1066 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER),
1067 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1068 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1069 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1070 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1071 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
1072 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1073 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER),
1074 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1075 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1076 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1077 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1078 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1079 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER),
1080 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1081 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER),
1082 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1083 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER),
1084 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1085 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER),
1086 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1087 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER),
1088 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1089 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER),
1090 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1093 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1094 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1095 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1096 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1097 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1098 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1099 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1100 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1101 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1102 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1103 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1104 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1105 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER),
1106 GEN_FW_RANGE(0x8800, 0x8bff, 0),
1107 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1108 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_BLITTER),
1109 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1110 GEN_FW_RANGE(0x9560, 0x95ff, 0),
1111 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_BLITTER),
1112 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1113 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER),
1114 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1115 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER),
1116 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1117 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_BLITTER),
1118 GEN_FW_RANGE(0x24000, 0x2407f, 0),
1119 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_BLITTER),
1120 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1121 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_BLITTER),
1122 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1123 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_BLITTER),
1124 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1125 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1126 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1127 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1128 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1129 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1132 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1133 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1134 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER),
1135 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1136 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1137 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER),
1138 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1139 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER),
1140 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1141 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER),
1142 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1143 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER),
1144 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1145 GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER),
1146 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1147 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER),
1148 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL),
1149 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER),
1150 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1151 GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER),
1152 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1153 GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER),
1154 GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER),
1155 GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER),
1156 GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER),
1157 GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER),
1158 GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER),
1159 GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER),
1160 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1161 GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER),
1162 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1163 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1164 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1),
1165 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0),
1166 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER),
1167 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1168 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3),
1169 GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1)
1173 ilk_dummy_write(struct intel_uncore *uncore)
1175 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1176 * the chip from rc6 before touching it for real. MI_MODE is masked,
1177 * hence harmless to write 0 into. */
1178 __raw_uncore_write32(uncore, MI_MODE, 0);
1182 __unclaimed_reg_debug(struct intel_uncore *uncore,
1183 const i915_reg_t reg,
1187 if (drm_WARN(&uncore->i915->drm,
1188 check_for_unclaimed_mmio(uncore) && !before,
1189 "Unclaimed %s register 0x%x\n",
1190 read ? "read from" : "write to",
1191 i915_mmio_reg_offset(reg)))
1192 /* Only report the first N failures */
1193 i915_modparams.mmio_debug--;
1197 unclaimed_reg_debug(struct intel_uncore *uncore,
1198 const i915_reg_t reg,
1202 if (likely(!i915_modparams.mmio_debug))
1205 /* interrupts are disabled and re-enabled around uncore->lock usage */
1206 lockdep_assert_held(&uncore->lock);
1209 spin_lock(&uncore->debug->lock);
1211 __unclaimed_reg_debug(uncore, reg, read, before);
1214 spin_unlock(&uncore->debug->lock);
1217 #define GEN2_READ_HEADER(x) \
1219 assert_rpm_wakelock_held(uncore->rpm);
1221 #define GEN2_READ_FOOTER \
1222 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1225 #define __gen2_read(x) \
1227 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1228 GEN2_READ_HEADER(x); \
1229 val = __raw_uncore_read##x(uncore, reg); \
1233 #define __gen5_read(x) \
1235 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1236 GEN2_READ_HEADER(x); \
1237 ilk_dummy_write(uncore); \
1238 val = __raw_uncore_read##x(uncore, reg); \
1254 #undef GEN2_READ_FOOTER
1255 #undef GEN2_READ_HEADER
1257 #define GEN6_READ_HEADER(x) \
1258 u32 offset = i915_mmio_reg_offset(reg); \
1259 unsigned long irqflags; \
1261 assert_rpm_wakelock_held(uncore->rpm); \
1262 spin_lock_irqsave(&uncore->lock, irqflags); \
1263 unclaimed_reg_debug(uncore, reg, true, true)
1265 #define GEN6_READ_FOOTER \
1266 unclaimed_reg_debug(uncore, reg, true, false); \
1267 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1268 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1271 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1272 enum forcewake_domains fw_domains)
1274 struct intel_uncore_forcewake_domain *domain;
1277 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1279 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1280 fw_domain_arm_timer(domain);
1282 uncore->funcs.force_wake_get(uncore, fw_domains);
1285 static inline void __force_wake_auto(struct intel_uncore *uncore,
1286 enum forcewake_domains fw_domains)
1288 GEM_BUG_ON(!fw_domains);
1290 /* Turn on all requested but inactive supported forcewake domains. */
1291 fw_domains &= uncore->fw_domains;
1292 fw_domains &= ~uncore->fw_domains_active;
1295 ___force_wake_auto(uncore, fw_domains);
1298 #define __gen_read(func, x) \
1300 func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1301 enum forcewake_domains fw_engine; \
1302 GEN6_READ_HEADER(x); \
1303 fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
1305 __force_wake_auto(uncore, fw_engine); \
1306 val = __raw_uncore_read##x(uncore, reg); \
1310 #define __gen_reg_read_funcs(func) \
1311 static enum forcewake_domains \
1312 func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1313 return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1316 __gen_read(func, 8) \
1317 __gen_read(func, 16) \
1318 __gen_read(func, 32) \
1319 __gen_read(func, 64)
1321 __gen_reg_read_funcs(gen12_fwtable);
1322 __gen_reg_read_funcs(gen11_fwtable);
1323 __gen_reg_read_funcs(fwtable);
1324 __gen_reg_read_funcs(gen6);
1326 #undef __gen_reg_read_funcs
1327 #undef GEN6_READ_FOOTER
1328 #undef GEN6_READ_HEADER
1330 #define GEN2_WRITE_HEADER \
1331 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1332 assert_rpm_wakelock_held(uncore->rpm); \
1334 #define GEN2_WRITE_FOOTER
1336 #define __gen2_write(x) \
1338 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1339 GEN2_WRITE_HEADER; \
1340 __raw_uncore_write##x(uncore, reg, val); \
1341 GEN2_WRITE_FOOTER; \
1344 #define __gen5_write(x) \
1346 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1347 GEN2_WRITE_HEADER; \
1348 ilk_dummy_write(uncore); \
1349 __raw_uncore_write##x(uncore, reg, val); \
1350 GEN2_WRITE_FOOTER; \
1363 #undef GEN2_WRITE_FOOTER
1364 #undef GEN2_WRITE_HEADER
1366 #define GEN6_WRITE_HEADER \
1367 u32 offset = i915_mmio_reg_offset(reg); \
1368 unsigned long irqflags; \
1369 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1370 assert_rpm_wakelock_held(uncore->rpm); \
1371 spin_lock_irqsave(&uncore->lock, irqflags); \
1372 unclaimed_reg_debug(uncore, reg, false, true)
1374 #define GEN6_WRITE_FOOTER \
1375 unclaimed_reg_debug(uncore, reg, false, false); \
1376 spin_unlock_irqrestore(&uncore->lock, irqflags)
1378 #define __gen6_write(x) \
1380 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1381 GEN6_WRITE_HEADER; \
1382 if (NEEDS_FORCE_WAKE(offset)) \
1383 __gen6_gt_wait_for_fifo(uncore); \
1384 __raw_uncore_write##x(uncore, reg, val); \
1385 GEN6_WRITE_FOOTER; \
1391 #define __gen_write(func, x) \
1393 func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1394 enum forcewake_domains fw_engine; \
1395 GEN6_WRITE_HEADER; \
1396 fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
1398 __force_wake_auto(uncore, fw_engine); \
1399 __raw_uncore_write##x(uncore, reg, val); \
1400 GEN6_WRITE_FOOTER; \
1403 #define __gen_reg_write_funcs(func) \
1404 static enum forcewake_domains \
1405 func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
1406 return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
1409 __gen_write(func, 8) \
1410 __gen_write(func, 16) \
1411 __gen_write(func, 32)
1413 __gen_reg_write_funcs(gen12_fwtable);
1414 __gen_reg_write_funcs(gen11_fwtable);
1415 __gen_reg_write_funcs(fwtable);
1416 __gen_reg_write_funcs(gen8);
1418 #undef __gen_reg_write_funcs
1419 #undef GEN6_WRITE_FOOTER
1420 #undef GEN6_WRITE_HEADER
1422 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1424 (uncore)->funcs.mmio_writeb = x##_write8; \
1425 (uncore)->funcs.mmio_writew = x##_write16; \
1426 (uncore)->funcs.mmio_writel = x##_write32; \
1429 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1431 (uncore)->funcs.mmio_readb = x##_read8; \
1432 (uncore)->funcs.mmio_readw = x##_read16; \
1433 (uncore)->funcs.mmio_readl = x##_read32; \
1434 (uncore)->funcs.mmio_readq = x##_read64; \
1437 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1439 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1440 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1443 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1445 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1446 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1449 static int __fw_domain_init(struct intel_uncore *uncore,
1450 enum forcewake_domain_id domain_id,
1454 struct intel_uncore_forcewake_domain *d;
1456 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1457 GEM_BUG_ON(uncore->fw_domain[domain_id]);
1459 if (i915_inject_probe_failure(uncore->i915))
1462 d = kzalloc(sizeof(*d), GFP_KERNEL);
1466 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1467 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
1471 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1472 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1476 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1477 BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
1478 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1479 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1480 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1481 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1482 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1483 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1484 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1486 d->mask = BIT(domain_id);
1488 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1489 d->timer.function = intel_uncore_fw_release_timer;
1491 uncore->fw_domains |= BIT(domain_id);
1495 uncore->fw_domain[domain_id] = d;
1500 static void fw_domain_fini(struct intel_uncore *uncore,
1501 enum forcewake_domain_id domain_id)
1503 struct intel_uncore_forcewake_domain *d;
1505 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1507 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1511 uncore->fw_domains &= ~BIT(domain_id);
1512 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1513 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
1517 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1519 struct intel_uncore_forcewake_domain *d;
1522 for_each_fw_domain(d, uncore, tmp)
1523 fw_domain_fini(uncore, d->id);
1526 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1528 struct drm_i915_private *i915 = uncore->i915;
1531 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1533 #define fw_domain_init(uncore__, id__, set__, ack__) \
1534 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1536 if (INTEL_GEN(i915) >= 11) {
1539 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1540 uncore->funcs.force_wake_put = fw_domains_put;
1541 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1542 FORCEWAKE_RENDER_GEN9,
1543 FORCEWAKE_ACK_RENDER_GEN9);
1544 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1545 FORCEWAKE_BLITTER_GEN9,
1546 FORCEWAKE_ACK_BLITTER_GEN9);
1548 for (i = 0; i < I915_MAX_VCS; i++) {
1549 if (!HAS_ENGINE(i915, _VCS(i)))
1552 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1553 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1554 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1556 for (i = 0; i < I915_MAX_VECS; i++) {
1557 if (!HAS_ENGINE(i915, _VECS(i)))
1560 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1561 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1562 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1564 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1565 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
1566 uncore->funcs.force_wake_put = fw_domains_put;
1567 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1568 FORCEWAKE_RENDER_GEN9,
1569 FORCEWAKE_ACK_RENDER_GEN9);
1570 fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
1571 FORCEWAKE_BLITTER_GEN9,
1572 FORCEWAKE_ACK_BLITTER_GEN9);
1573 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1574 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1575 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1576 uncore->funcs.force_wake_get = fw_domains_get;
1577 uncore->funcs.force_wake_put = fw_domains_put;
1578 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1579 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1580 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1581 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1582 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1583 uncore->funcs.force_wake_get =
1584 fw_domains_get_with_thread_status;
1585 uncore->funcs.force_wake_put = fw_domains_put;
1586 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1587 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1588 } else if (IS_IVYBRIDGE(i915)) {
1591 /* IVB configs may use multi-threaded forcewake */
1593 /* A small trick here - if the bios hasn't configured
1594 * MT forcewake, and if the device is in RC6, then
1595 * force_wake_mt_get will not wake the device and the
1596 * ECOBUS read will return zero. Which will be
1597 * (correctly) interpreted by the test below as MT
1598 * forcewake being disabled.
1600 uncore->funcs.force_wake_get =
1601 fw_domains_get_with_thread_status;
1602 uncore->funcs.force_wake_put = fw_domains_put;
1604 /* We need to init first for ECOBUS access and then
1605 * determine later if we want to reinit, in case of MT access is
1606 * not working. In this stage we don't know which flavour this
1607 * ivb is, so it is better to reset also the gen6 fw registers
1608 * before the ecobus check.
1611 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1612 __raw_posting_read(uncore, ECOBUS);
1614 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1615 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1619 spin_lock_irq(&uncore->lock);
1620 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1621 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1622 fw_domains_put(uncore, FORCEWAKE_RENDER);
1623 spin_unlock_irq(&uncore->lock);
1625 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1626 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1627 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
1628 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1629 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1630 FORCEWAKE, FORCEWAKE_ACK);
1632 } else if (IS_GEN(i915, 6)) {
1633 uncore->funcs.force_wake_get =
1634 fw_domains_get_with_thread_status;
1635 uncore->funcs.force_wake_put = fw_domains_put;
1636 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1637 FORCEWAKE, FORCEWAKE_ACK);
1640 #undef fw_domain_init
1642 /* All future platforms are expected to require complex power gating */
1643 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
1647 intel_uncore_fw_domains_fini(uncore);
1652 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1654 (uncore)->fw_domains_table = \
1655 (struct intel_forcewake_range *)(d); \
1656 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
1659 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
1660 unsigned long action, void *data)
1662 struct intel_uncore *uncore = container_of(nb,
1663 struct intel_uncore, pmic_bus_access_nb);
1666 case MBI_PMIC_BUS_ACCESS_BEGIN:
1668 * forcewake all now to make sure that we don't need to do a
1669 * forcewake later which on systems where this notifier gets
1670 * called requires the punit to access to the shared pmic i2c
1671 * bus, which will be busy after this notification, leading to:
1672 * "render: timed out waiting for forcewake ack request."
1675 * The notifier is unregistered during intel_runtime_suspend(),
1676 * so it's ok to access the HW here without holding a RPM
1677 * wake reference -> disable wakeref asserts for the time of
1680 disable_rpm_wakeref_asserts(uncore->rpm);
1681 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1682 enable_rpm_wakeref_asserts(uncore->rpm);
1684 case MBI_PMIC_BUS_ACCESS_END:
1685 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1692 static int uncore_mmio_setup(struct intel_uncore *uncore)
1694 struct drm_i915_private *i915 = uncore->i915;
1695 struct pci_dev *pdev = i915->drm.pdev;
1699 mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
1701 * Before gen4, the registers and the GTT are behind different BARs.
1702 * However, from gen4 onwards, the registers and the GTT are shared
1703 * in the same BAR, so we want to restrict this ioremap from
1704 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1705 * the register BAR remains the same size for all the earlier
1706 * generations up to Ironlake.
1708 if (INTEL_GEN(i915) < 5)
1709 mmio_size = 512 * 1024;
1711 mmio_size = 2 * 1024 * 1024;
1712 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
1713 if (uncore->regs == NULL) {
1714 drm_err(&i915->drm, "failed to map registers\n");
1721 static void uncore_mmio_cleanup(struct intel_uncore *uncore)
1723 struct pci_dev *pdev = uncore->i915->drm.pdev;
1725 pci_iounmap(pdev, uncore->regs);
1728 void intel_uncore_init_early(struct intel_uncore *uncore,
1729 struct drm_i915_private *i915)
1731 spin_lock_init(&uncore->lock);
1732 uncore->i915 = i915;
1733 uncore->rpm = &i915->runtime_pm;
1734 uncore->debug = &i915->mmio_debug;
1737 static void uncore_raw_init(struct intel_uncore *uncore)
1739 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
1741 if (IS_GEN(uncore->i915, 5)) {
1742 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
1743 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
1745 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
1746 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
1750 static int uncore_forcewake_init(struct intel_uncore *uncore)
1752 struct drm_i915_private *i915 = uncore->i915;
1755 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1757 ret = intel_uncore_fw_domains_init(uncore);
1760 forcewake_early_sanitize(uncore, 0);
1762 if (IS_GEN_RANGE(i915, 6, 7)) {
1763 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
1765 if (IS_VALLEYVIEW(i915)) {
1766 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
1767 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1769 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1771 } else if (IS_GEN(i915, 8)) {
1772 if (IS_CHERRYVIEW(i915)) {
1773 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
1774 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1775 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1777 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
1778 ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
1780 } else if (IS_GEN_RANGE(i915, 9, 10)) {
1781 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
1782 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
1783 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
1784 } else if (IS_GEN(i915, 11)) {
1785 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
1786 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
1787 ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
1789 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
1790 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
1791 ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
1794 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
1795 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
1800 int intel_uncore_init_mmio(struct intel_uncore *uncore)
1802 struct drm_i915_private *i915 = uncore->i915;
1805 ret = uncore_mmio_setup(uncore);
1809 if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
1810 uncore->flags |= UNCORE_HAS_FORCEWAKE;
1812 if (!intel_uncore_has_forcewake(uncore)) {
1813 uncore_raw_init(uncore);
1815 ret = uncore_forcewake_init(uncore);
1817 goto out_mmio_cleanup;
1820 /* make sure fw funcs are set if and only if we have fw*/
1821 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
1822 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
1823 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
1824 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
1826 if (HAS_FPGA_DBG_UNCLAIMED(i915))
1827 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
1829 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1830 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
1832 if (IS_GEN_RANGE(i915, 6, 7))
1833 uncore->flags |= UNCORE_HAS_FIFO;
1835 /* clear out unclaimed reg detection bit */
1836 if (intel_uncore_unclaimed_mmio(uncore))
1837 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
1842 uncore_mmio_cleanup(uncore);
1848 * We might have detected that some engines are fused off after we initialized
1849 * the forcewake domains. Prune them, to make sure they only reference existing
1852 void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
1854 struct drm_i915_private *i915 = uncore->i915;
1855 enum forcewake_domains fw_domains = uncore->fw_domains;
1856 enum forcewake_domain_id domain_id;
1859 if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
1862 for (i = 0; i < I915_MAX_VCS; i++) {
1863 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
1865 if (HAS_ENGINE(i915, _VCS(i)))
1868 if (fw_domains & BIT(domain_id))
1869 fw_domain_fini(uncore, domain_id);
1872 for (i = 0; i < I915_MAX_VECS; i++) {
1873 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
1875 if (HAS_ENGINE(i915, _VECS(i)))
1878 if (fw_domains & BIT(domain_id))
1879 fw_domain_fini(uncore, domain_id);
1883 void intel_uncore_fini_mmio(struct intel_uncore *uncore)
1885 if (intel_uncore_has_forcewake(uncore)) {
1886 iosf_mbi_punit_acquire();
1887 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
1888 &uncore->pmic_bus_access_nb);
1889 intel_uncore_forcewake_reset(uncore);
1890 intel_uncore_fw_domains_fini(uncore);
1891 iosf_mbi_punit_release();
1894 uncore_mmio_cleanup(uncore);
1897 static const struct reg_whitelist {
1898 i915_reg_t offset_ldw;
1899 i915_reg_t offset_udw;
1902 } reg_read_whitelist[] = { {
1903 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
1904 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
1905 .gen_mask = INTEL_GEN_MASK(4, 12),
1909 int i915_reg_read_ioctl(struct drm_device *dev,
1910 void *data, struct drm_file *file)
1912 struct drm_i915_private *i915 = to_i915(dev);
1913 struct intel_uncore *uncore = &i915->uncore;
1914 struct drm_i915_reg_read *reg = data;
1915 struct reg_whitelist const *entry;
1916 intel_wakeref_t wakeref;
1921 entry = reg_read_whitelist;
1922 remain = ARRAY_SIZE(reg_read_whitelist);
1924 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
1926 GEM_BUG_ON(!is_power_of_2(entry->size));
1927 GEM_BUG_ON(entry->size > 8);
1928 GEM_BUG_ON(entry_offset & (entry->size - 1));
1930 if (INTEL_INFO(i915)->gen_mask & entry->gen_mask &&
1931 entry_offset == (reg->offset & -entry->size))
1940 flags = reg->offset & (entry->size - 1);
1942 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1943 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
1944 reg->val = intel_uncore_read64_2x32(uncore,
1947 else if (entry->size == 8 && flags == 0)
1948 reg->val = intel_uncore_read64(uncore,
1950 else if (entry->size == 4 && flags == 0)
1951 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
1952 else if (entry->size == 2 && flags == 0)
1953 reg->val = intel_uncore_read16(uncore,
1955 else if (entry->size == 1 && flags == 0)
1956 reg->val = intel_uncore_read8(uncore,
1966 * __intel_wait_for_register_fw - wait until register matches expected state
1967 * @uncore: the struct intel_uncore
1968 * @reg: the register to read
1969 * @mask: mask to apply to register value
1970 * @value: expected value
1971 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1972 * @slow_timeout_ms: slow timeout in millisecond
1973 * @out_value: optional placeholder to hold registry value
1975 * This routine waits until the target register @reg contains the expected
1976 * @value after applying the @mask, i.e. it waits until ::
1978 * (I915_READ_FW(reg) & mask) == value
1980 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1981 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1982 * must be not larger than 20,0000 microseconds.
1984 * Note that this routine assumes the caller holds forcewake asserted, it is
1985 * not suitable for very long waits. See intel_wait_for_register() if you
1986 * wish to wait without holding forcewake for the duration (i.e. you expect
1987 * the wait to be slow).
1989 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
1991 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
1995 unsigned int fast_timeout_us,
1996 unsigned int slow_timeout_ms,
1999 u32 uninitialized_var(reg_value);
2000 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2003 /* Catch any overuse of this function */
2004 might_sleep_if(slow_timeout_ms);
2005 GEM_BUG_ON(fast_timeout_us > 20000);
2008 if (fast_timeout_us && fast_timeout_us <= 20000)
2009 ret = _wait_for_atomic(done, fast_timeout_us, 0);
2010 if (ret && slow_timeout_ms)
2011 ret = wait_for(done, slow_timeout_ms);
2014 *out_value = reg_value;
2021 * __intel_wait_for_register - wait until register matches expected state
2022 * @uncore: the struct intel_uncore
2023 * @reg: the register to read
2024 * @mask: mask to apply to register value
2025 * @value: expected value
2026 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2027 * @slow_timeout_ms: slow timeout in millisecond
2028 * @out_value: optional placeholder to hold registry value
2030 * This routine waits until the target register @reg contains the expected
2031 * @value after applying the @mask, i.e. it waits until ::
2033 * (I915_READ(reg) & mask) == value
2035 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2037 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2039 int __intel_wait_for_register(struct intel_uncore *uncore,
2043 unsigned int fast_timeout_us,
2044 unsigned int slow_timeout_ms,
2048 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2052 might_sleep_if(slow_timeout_ms);
2054 spin_lock_irq(&uncore->lock);
2055 intel_uncore_forcewake_get__locked(uncore, fw);
2057 ret = __intel_wait_for_register_fw(uncore,
2059 fast_timeout_us, 0, ®_value);
2061 intel_uncore_forcewake_put__locked(uncore, fw);
2062 spin_unlock_irq(&uncore->lock);
2064 if (ret && slow_timeout_ms)
2065 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2067 (reg_value & mask) == value,
2068 slow_timeout_ms * 1000, 10, 1000);
2070 /* just trace the final value */
2071 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2074 *out_value = reg_value;
2079 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2083 spin_lock_irq(&uncore->debug->lock);
2084 ret = check_for_unclaimed_mmio(uncore);
2085 spin_unlock_irq(&uncore->debug->lock);
2091 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2095 spin_lock_irq(&uncore->debug->lock);
2097 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2100 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2101 if (!i915_modparams.mmio_debug) {
2102 drm_dbg(&uncore->i915->drm,
2103 "Unclaimed register detected, "
2104 "enabling oneshot unclaimed register reporting. "
2105 "Please use i915.mmio_debug=N for more information.\n");
2106 i915_modparams.mmio_debug++;
2108 uncore->debug->unclaimed_mmio_check--;
2113 spin_unlock_irq(&uncore->debug->lock);
2119 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2121 * @uncore: pointer to struct intel_uncore
2122 * @reg: register in question
2123 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2125 * Returns a set of forcewake domains required to be taken with for example
2126 * intel_uncore_forcewake_get for the specified register to be accessible in the
2127 * specified mode (read, write or read/write) with raw mmio accessors.
2129 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2130 * callers to do FIFO management on their own or risk losing writes.
2132 enum forcewake_domains
2133 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2134 i915_reg_t reg, unsigned int op)
2136 enum forcewake_domains fw_domains = 0;
2138 drm_WARN_ON(&uncore->i915->drm, !op);
2140 if (!intel_uncore_has_forcewake(uncore))
2143 if (op & FW_REG_READ)
2144 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2146 if (op & FW_REG_WRITE)
2147 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2149 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2154 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2155 #include "selftests/mock_uncore.c"
2156 #include "selftests/intel_uncore.c"