2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
35 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
37 WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
40 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
42 WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
46 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
47 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
50 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
51 * correct value since it doesn't return the RCV_DW0 under the case that
52 * RCV_MSG_VALID is set by host.
54 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
56 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
57 mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
61 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
71 xgpu_nv_mailbox_send_ack(adev);
76 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
78 return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
81 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
83 int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
87 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
93 } while (timeout > 1);
95 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
100 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
102 int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
105 r = xgpu_nv_mailbox_rcv_msg(adev, event);
111 } while (timeout > 1);
113 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
118 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
119 enum idh_request req, u32 data1, u32 data2, u32 data3)
126 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
127 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
128 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
129 * will return immediatly
132 xgpu_nv_mailbox_set_valid(adev, false);
133 trn = xgpu_nv_peek_ack(adev);
135 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
140 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
141 mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
142 reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
144 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
146 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
148 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
150 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
153 xgpu_nv_mailbox_set_valid(adev, true);
155 /* start to poll ack */
156 r = xgpu_nv_poll_ack(adev);
158 pr_err("Doesn't get ack from pf, continue\n");
160 xgpu_nv_mailbox_set_valid(adev, false);
163 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
164 enum idh_request req)
168 xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
170 /* start to check msg if request is idh_req_gpu_init_access */
171 if (req == IDH_REQ_GPU_INIT_ACCESS ||
172 req == IDH_REQ_GPU_FINI_ACCESS ||
173 req == IDH_REQ_GPU_RESET_ACCESS) {
174 r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
176 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
179 /* Retrieve checksum from mailbox2 */
180 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
181 adev->virt.fw_reserve.checksum_key =
182 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
183 mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
190 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
192 return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
195 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
198 enum idh_request req;
200 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
201 return xgpu_nv_send_access_requests(adev, req);
204 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
207 enum idh_request req;
210 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
211 r = xgpu_nv_send_access_requests(adev, req);
216 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
217 struct amdgpu_irq_src *source,
218 struct amdgpu_iv_entry *entry)
220 DRM_DEBUG("get ack intr and do nothing.\n");
224 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
225 struct amdgpu_irq_src *source,
227 enum amdgpu_interrupt_state state)
229 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
231 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
232 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
233 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
238 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
240 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
241 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
242 int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
245 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
246 * otherwise the mailbox msg will be ruined/reseted by
249 * we can unlock the lock_reset to allow "amdgpu_job_timedout"
250 * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
251 * which means host side had finished this VF's FLR.
253 locked = mutex_trylock(&adev->lock_reset);
255 adev->in_gpu_reset = true;
258 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
263 } while (timeout > 1);
267 adev->in_gpu_reset = false;
268 mutex_unlock(&adev->lock_reset);
271 /* Trigger recovery for world switch failure if no TDR */
272 if (amdgpu_device_should_recover_gpu(adev)
273 && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
274 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
275 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
276 adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
277 amdgpu_device_gpu_recover(adev, NULL);
280 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
281 struct amdgpu_irq_src *src,
283 enum amdgpu_interrupt_state state)
285 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
287 tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
288 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
289 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
294 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
295 struct amdgpu_irq_src *source,
296 struct amdgpu_iv_entry *entry)
298 enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
301 case IDH_FLR_NOTIFICATION:
302 if (amdgpu_sriov_runtime(adev))
303 schedule_work(&adev->virt.flr_work);
305 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
306 * it byfar since that polling thread will handle it,
307 * other msg like flr complete is not handled here.
309 case IDH_CLR_MSG_BUF:
310 case IDH_FLR_NOTIFICATION_CMPL:
311 case IDH_READY_TO_ACCESS_GPU:
319 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
320 .set = xgpu_nv_set_mailbox_ack_irq,
321 .process = xgpu_nv_mailbox_ack_irq,
324 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
325 .set = xgpu_nv_set_mailbox_rcv_irq,
326 .process = xgpu_nv_mailbox_rcv_irq,
329 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
331 adev->virt.ack_irq.num_types = 1;
332 adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
333 adev->virt.rcv_irq.num_types = 1;
334 adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
337 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
341 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
345 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
347 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
354 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
358 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
361 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
363 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
367 INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
372 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
374 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
375 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
378 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
379 .req_full_gpu = xgpu_nv_request_full_gpu_access,
380 .rel_full_gpu = xgpu_nv_release_full_gpu_access,
381 .reset_gpu = xgpu_nv_request_reset,
383 .trans_msg = xgpu_nv_mailbox_trans_msg,