2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <asm/iosf_mbi.h>
27 #include "intel_sideband.h"
30 #include "intel_drv.h"
33 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
34 * VLV_VLV2_PUNIT_HAS_0.8.docx
37 /* Standard MMIO read, non-posted */
38 #define SB_MRD_NP 0x00
39 /* Standard MMIO write, non-posted */
40 #define SB_MWR_NP 0x01
41 /* Private register read, double-word addressing, non-posted */
42 #define SB_CRRDDA_NP 0x06
43 /* Private register write, double-word addressing, non-posted */
44 #define SB_CRWRDA_NP 0x07
46 static void ping(void *info)
50 static void __vlv_punit_get(struct drm_i915_private *i915)
52 iosf_mbi_punit_acquire();
55 * Prevent the cpu from sleeping while we use this sideband, otherwise
56 * the punit may cause a machine hang. The issue appears to be isolated
57 * with changing the power state of the CPU package while changing
58 * the power state via the punit, and we have only observed it
59 * reliably on 4-core Baytail systems suggesting the issue is in the
60 * power delivery mechanism and likely to be be board/function
61 * specific. Hence we presume the workaround needs only be applied
62 * to the Valleyview P-unit and not all sideband communications.
64 if (IS_VALLEYVIEW(i915)) {
65 pm_qos_update_request(&i915->sb_qos, 0);
66 on_each_cpu(ping, NULL, 1);
70 static void __vlv_punit_put(struct drm_i915_private *i915)
72 if (IS_VALLEYVIEW(i915))
73 pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
75 iosf_mbi_punit_release();
78 void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
80 if (ports & BIT(VLV_IOSF_SB_PUNIT))
81 __vlv_punit_get(i915);
83 mutex_lock(&i915->sb_lock);
86 void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
88 mutex_unlock(&i915->sb_lock);
90 if (ports & BIT(VLV_IOSF_SB_PUNIT))
91 __vlv_punit_put(i915);
94 static int vlv_sideband_rw(struct drm_i915_private *i915,
95 u32 devfn, u32 port, u32 opcode,
98 struct intel_uncore *uncore = &i915->uncore;
99 const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
102 lockdep_assert_held(&i915->sb_lock);
103 if (port == IOSF_PORT_PUNIT)
104 iosf_mbi_assert_punit_acquired();
106 /* Flush the previous comms, just in case it failed last time. */
107 if (intel_wait_for_register(uncore,
108 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
110 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
111 is_read ? "read" : "write");
117 intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
118 intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
119 intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
120 (devfn << IOSF_DEVFN_SHIFT) |
121 (opcode << IOSF_OPCODE_SHIFT) |
122 (port << IOSF_PORT_SHIFT) |
123 (0xf << IOSF_BYTE_ENABLES_SHIFT) |
124 (0 << IOSF_BAR_SHIFT) |
127 if (__intel_wait_for_register_fw(uncore,
128 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
129 10000, 0, NULL) == 0) {
131 *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
134 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
135 is_read ? "read" : "write");
144 u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
148 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
149 SB_CRRDDA_NP, addr, &val);
154 int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
156 return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
157 SB_CRWRDA_NP, addr, &val);
160 u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
164 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
165 SB_CRRDDA_NP, reg, &val);
170 void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
172 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
173 SB_CRWRDA_NP, reg, &val);
176 u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
180 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
181 SB_CRRDDA_NP, addr, &val);
186 u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
190 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
191 SB_CRRDDA_NP, reg, &val);
196 void vlv_iosf_sb_write(struct drm_i915_private *i915,
197 u8 port, u32 reg, u32 val)
199 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
200 SB_CRWRDA_NP, reg, &val);
203 u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
207 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
208 SB_CRRDDA_NP, reg, &val);
213 void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
215 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
216 SB_CRWRDA_NP, reg, &val);
219 u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
223 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
224 SB_CRRDDA_NP, reg, &val);
229 void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
231 vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
232 SB_CRWRDA_NP, reg, &val);
235 u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
237 int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
240 vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
243 * FIXME: There might be some registers where all 1's is a valid value,
244 * so ideally we should check the register offset instead...
246 WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
247 pipe_name(pipe), reg, val);
252 void vlv_dpio_write(struct drm_i915_private *i915,
253 enum pipe pipe, int reg, u32 val)
255 int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
257 vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
260 u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
264 vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
269 void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
271 vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
276 static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
277 enum intel_sbi_destination destination,
278 u32 *val, bool is_read)
280 struct intel_uncore *uncore = &i915->uncore;
283 lockdep_assert_held(&i915->sb_lock);
285 if (intel_wait_for_register_fw(uncore,
286 SBI_CTL_STAT, SBI_BUSY, 0,
288 DRM_ERROR("timeout waiting for SBI to become ready\n");
292 intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
293 intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
295 if (destination == SBI_ICLK)
296 cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
298 cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
301 intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
303 if (__intel_wait_for_register_fw(uncore,
304 SBI_CTL_STAT, SBI_BUSY, 0,
306 DRM_ERROR("timeout waiting for SBI to complete read\n");
310 if (cmd & SBI_RESPONSE_FAIL) {
311 DRM_ERROR("error during SBI read of reg %x\n", reg);
316 *val = intel_uncore_read_fw(uncore, SBI_DATA);
321 u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
322 enum intel_sbi_destination destination)
326 intel_sbi_rw(i915, reg, destination, &result, true);
331 void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
332 enum intel_sbi_destination destination)
334 intel_sbi_rw(i915, reg, destination, &value, false);
337 static inline int gen6_check_mailbox_status(u32 mbox)
339 switch (mbox & GEN6_PCODE_ERROR_MASK) {
340 case GEN6_PCODE_SUCCESS:
342 case GEN6_PCODE_UNIMPLEMENTED_CMD:
344 case GEN6_PCODE_ILLEGAL_CMD:
346 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
347 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
349 case GEN6_PCODE_TIMEOUT:
352 MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
357 static inline int gen7_check_mailbox_status(u32 mbox)
359 switch (mbox & GEN6_PCODE_ERROR_MASK) {
360 case GEN6_PCODE_SUCCESS:
362 case GEN6_PCODE_ILLEGAL_CMD:
364 case GEN7_PCODE_TIMEOUT:
366 case GEN7_PCODE_ILLEGAL_DATA:
368 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
371 MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
376 static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
377 u32 mbox, u32 *val, u32 *val1,
382 struct intel_uncore *uncore = &i915->uncore;
384 lockdep_assert_held(&i915->sb_lock);
387 * GEN6_PCODE_* are outside of the forcewake domain, we can
388 * use te fw I915_READ variants to reduce the amount of work
389 * required when reading/writing.
392 if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
395 intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
396 intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
397 intel_uncore_write_fw(uncore,
398 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
400 if (__intel_wait_for_register_fw(uncore,
409 *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
411 *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
413 if (INTEL_GEN(i915) > 6)
414 return gen7_check_mailbox_status(mbox);
416 return gen6_check_mailbox_status(mbox);
419 int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
424 mutex_lock(&i915->sb_lock);
425 err = __sandybridge_pcode_rw(i915, mbox, val, val1,
428 mutex_unlock(&i915->sb_lock);
431 DRM_DEBUG_DRIVER("warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
432 mbox, __builtin_return_address(0), err);
438 int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
445 mutex_lock(&i915->sb_lock);
446 err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
447 fast_timeout_us, slow_timeout_ms,
449 mutex_unlock(&i915->sb_lock);
452 DRM_DEBUG_DRIVER("warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
453 val, mbox, __builtin_return_address(0), err);
459 static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
460 u32 request, u32 reply_mask, u32 reply,
463 *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
467 return *status || ((request & reply_mask) == reply);
471 * skl_pcode_request - send PCODE request until acknowledgment
472 * @i915: device private
473 * @mbox: PCODE mailbox ID the request is targeted for
474 * @request: request ID
475 * @reply_mask: mask used to check for request acknowledgment
476 * @reply: value used to check for request acknowledgment
477 * @timeout_base_ms: timeout for polling with preemption enabled
479 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
480 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
481 * The request is acknowledged once the PCODE reply dword equals @reply after
482 * applying @reply_mask. Polling is first attempted with preemption enabled
483 * for @timeout_base_ms and if this times out for another 50 ms with
484 * preemption disabled.
486 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
487 * other error as reported by PCODE.
489 int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
490 u32 reply_mask, u32 reply, int timeout_base_ms)
495 mutex_lock(&i915->sb_lock);
498 skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
501 * Prime the PCODE by doing a request first. Normally it guarantees
502 * that a subsequent request, at most @timeout_base_ms later, succeeds.
503 * _wait_for() doesn't guarantee when its passed condition is evaluated
504 * first, so send the first request explicitly.
510 ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
515 * The above can time out if the number of requests was low (2 in the
516 * worst case) _and_ PCODE was busy for some reason even after a
517 * (queued) request and @timeout_base_ms delay. As a workaround retry
518 * the poll with preemption disabled to maximize the number of
519 * requests. Increase the timeout from @timeout_base_ms to 50ms to
520 * account for interrupts that could reduce the number of these
521 * requests, and for any quirks of the PCODE firmware that delays
522 * the request completion.
524 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
525 WARN_ON_ONCE(timeout_base_ms > 3);
527 ret = wait_for_atomic(COND, 50);
531 mutex_unlock(&i915->sb_lock);
532 return ret ? ret : status;