Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / mxgpu_nv.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_2_3_offset.h"
26 #include "nbio/nbio_2_3_sh_mask.h"
27 #include "gc/gc_10_1_0_offset.h"
28 #include "gc/gc_10_1_0_sh_mask.h"
29 #include "soc15.h"
30 #include "navi10_ih.h"
31 #include "soc15_common.h"
32 #include "mxgpu_nv.h"
33
34 static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
35 {
36         WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37 }
38
39 static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40 {
41         WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42 }
43
44 /*
45  * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46  * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
47  * by host.
48  *
49  * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50  * correct value since it doesn't return the RCV_DW0 under the case that
51  * RCV_MSG_VALID is set by host.
52  */
53 static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
54 {
55         return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
56 }
57
58
59 static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
60                                    enum idh_event event)
61 {
62         u32 reg;
63
64         reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
65         if (reg != event)
66                 return -ENOENT;
67
68         xgpu_nv_mailbox_send_ack(adev);
69
70         return 0;
71 }
72
73 static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
74 {
75         return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
76 }
77
78 static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
79 {
80         int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
81         u8 reg;
82
83         do {
84                 reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
85                 if (reg & 2)
86                         return 0;
87
88                 mdelay(5);
89                 timeout -= 5;
90         } while (timeout > 1);
91
92         pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
93
94         return -ETIME;
95 }
96
97 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
98 {
99         int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
100
101         do {
102                 r = xgpu_nv_mailbox_rcv_msg(adev, event);
103                 if (!r)
104                         return 0;
105
106                 msleep(10);
107                 timeout -= 10;
108         } while (timeout > 1);
109
110
111         return -ETIME;
112 }
113
114 static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
115               enum idh_request req, u32 data1, u32 data2, u32 data3)
116 {
117         int r;
118         uint8_t trn;
119
120         /* IMPORTANT:
121          * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
122          * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
123          * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
124          * will return immediatly
125          */
126         do {
127                 xgpu_nv_mailbox_set_valid(adev, false);
128                 trn = xgpu_nv_peek_ack(adev);
129                 if (trn) {
130                         pr_err("trn=%x ACK should not assert! wait again !\n", trn);
131                         msleep(1);
132                 }
133         } while (trn);
134
135         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
136         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
137         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
138         WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
139         xgpu_nv_mailbox_set_valid(adev, true);
140
141         /* start to poll ack */
142         r = xgpu_nv_poll_ack(adev);
143         if (r)
144                 pr_err("Doesn't get ack from pf, continue\n");
145
146         xgpu_nv_mailbox_set_valid(adev, false);
147 }
148
149 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
150                                         enum idh_request req)
151 {
152         int r;
153         enum idh_event event = -1;
154
155         xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
156
157         switch (req) {
158         case IDH_REQ_GPU_INIT_ACCESS:
159         case IDH_REQ_GPU_FINI_ACCESS:
160         case IDH_REQ_GPU_RESET_ACCESS:
161                 event = IDH_READY_TO_ACCESS_GPU;
162                 break;
163         case IDH_REQ_GPU_INIT_DATA:
164                 event = IDH_REQ_GPU_INIT_DATA_READY;
165                 break;
166         default:
167                 break;
168         }
169
170         if (event != -1) {
171                 r = xgpu_nv_poll_msg(adev, event);
172                 if (r) {
173                         if (req != IDH_REQ_GPU_INIT_DATA) {
174                                 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
175                                 return r;
176                         }
177                         else /* host doesn't support REQ_GPU_INIT_DATA handshake */
178                                 adev->virt.req_init_data_ver = 0;
179                 } else {
180                         if (req == IDH_REQ_GPU_INIT_DATA)
181                         {
182                                 adev->virt.req_init_data_ver =
183                                         RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
184
185                                 /* assume V1 in case host doesn't set version number */
186                                 if (adev->virt.req_init_data_ver < 1)
187                                         adev->virt.req_init_data_ver = 1;
188                         }
189                 }
190
191                 /* Retrieve checksum from mailbox2 */
192                 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
193                         adev->virt.fw_reserve.checksum_key =
194                                 RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
195                 }
196         }
197
198         return 0;
199 }
200
201 static int xgpu_nv_request_reset(struct amdgpu_device *adev)
202 {
203         return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
204 }
205
206 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
207                                            bool init)
208 {
209         enum idh_request req;
210
211         req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
212         return xgpu_nv_send_access_requests(adev, req);
213 }
214
215 static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
216                                            bool init)
217 {
218         enum idh_request req;
219         int r = 0;
220
221         req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
222         r = xgpu_nv_send_access_requests(adev, req);
223
224         return r;
225 }
226
227 static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
228 {
229         return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
230 }
231
232 static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
233                                         struct amdgpu_irq_src *source,
234                                         struct amdgpu_iv_entry *entry)
235 {
236         DRM_DEBUG("get ack intr and do nothing.\n");
237         return 0;
238 }
239
240 static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
241                                         struct amdgpu_irq_src *source,
242                                         unsigned type,
243                                         enum amdgpu_interrupt_state state)
244 {
245         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
246
247         if (state == AMDGPU_IRQ_STATE_ENABLE)
248                 tmp |= 2;
249         else
250                 tmp &= ~2;
251
252         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
253
254         return 0;
255 }
256
257 static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
258 {
259         struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
260         struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
261         int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
262         int locked;
263
264         /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
265          * otherwise the mailbox msg will be ruined/reseted by
266          * the VF FLR.
267          *
268          * we can unlock the lock_reset to allow "amdgpu_job_timedout"
269          * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
270          * which means host side had finished this VF's FLR.
271          */
272         locked = mutex_trylock(&adev->lock_reset);
273         if (locked)
274                 adev->in_gpu_reset = true;
275
276         do {
277                 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
278                         goto flr_done;
279
280                 msleep(10);
281                 timeout -= 10;
282         } while (timeout > 1);
283
284 flr_done:
285         if (locked) {
286                 adev->in_gpu_reset = false;
287                 mutex_unlock(&adev->lock_reset);
288         }
289
290         /* Trigger recovery for world switch failure if no TDR */
291         if (amdgpu_device_should_recover_gpu(adev)
292                 && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
293                 adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
294                 adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
295                 adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
296                 amdgpu_device_gpu_recover(adev, NULL);
297 }
298
299 static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
300                                        struct amdgpu_irq_src *src,
301                                        unsigned type,
302                                        enum amdgpu_interrupt_state state)
303 {
304         u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
305
306         if (state == AMDGPU_IRQ_STATE_ENABLE)
307                 tmp |= 1;
308         else
309                 tmp &= ~1;
310
311         WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
312
313         return 0;
314 }
315
316 static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
317                                    struct amdgpu_irq_src *source,
318                                    struct amdgpu_iv_entry *entry)
319 {
320         enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
321
322         switch (event) {
323         case IDH_FLR_NOTIFICATION:
324                 if (amdgpu_sriov_runtime(adev))
325                         schedule_work(&adev->virt.flr_work);
326                 break;
327                 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
328                  * it byfar since that polling thread will handle it,
329                  * other msg like flr complete is not handled here.
330                  */
331         case IDH_CLR_MSG_BUF:
332         case IDH_FLR_NOTIFICATION_CMPL:
333         case IDH_READY_TO_ACCESS_GPU:
334         default:
335                 break;
336         }
337
338         return 0;
339 }
340
341 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
342         .set = xgpu_nv_set_mailbox_ack_irq,
343         .process = xgpu_nv_mailbox_ack_irq,
344 };
345
346 static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
347         .set = xgpu_nv_set_mailbox_rcv_irq,
348         .process = xgpu_nv_mailbox_rcv_irq,
349 };
350
351 void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
352 {
353         adev->virt.ack_irq.num_types = 1;
354         adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
355         adev->virt.rcv_irq.num_types = 1;
356         adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
357 }
358
359 int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
360 {
361         int r;
362
363         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
364         if (r)
365                 return r;
366
367         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
368         if (r) {
369                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
370                 return r;
371         }
372
373         return 0;
374 }
375
376 int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
377 {
378         int r;
379
380         r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
381         if (r)
382                 return r;
383         r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
384         if (r) {
385                 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
386                 return r;
387         }
388
389         INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
390
391         return 0;
392 }
393
394 void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
395 {
396         amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
397         amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
398 }
399
400 const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
401         .req_full_gpu   = xgpu_nv_request_full_gpu_access,
402         .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
403         .req_init_data  = xgpu_nv_request_init_data,
404         .reset_gpu = xgpu_nv_request_reset,
405         .wait_reset = NULL,
406         .trans_msg = xgpu_nv_mailbox_trans_msg,
407 };