Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "amdgpu.h"
25
26 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
27 {
28         uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
29
30         addr -= AMDGPU_VA_RESERVED_SIZE;
31         addr = amdgpu_gmc_sign_extend(addr);
32
33         return addr;
34 }
35
36 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
37 {
38         /* By now all MMIO pages except mailbox are blocked */
39         /* if blocking is enabled in hypervisor. Choose the */
40         /* SCRATCH_REG0 to test. */
41         return RREG32_NO_KIQ(0xc040) == 0xffffffff;
42 }
43
44 int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
45 {
46         int r;
47         void *ptr;
48
49         r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
50                                 AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
51                                 &adev->virt.csa_vmid0_addr, &ptr);
52         if (r)
53                 return r;
54
55         memset(ptr, 0, AMDGPU_CSA_SIZE);
56         return 0;
57 }
58
59 void amdgpu_free_static_csa(struct amdgpu_device *adev) {
60         amdgpu_bo_free_kernel(&adev->virt.csa_obj,
61                                                 &adev->virt.csa_vmid0_addr,
62                                                 NULL);
63 }
64
65 /*
66  * amdgpu_map_static_csa should be called during amdgpu_vm_init
67  * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
68  * submission of GFX should use this virtual address within META_DATA init
69  * package to support SRIOV gfx preemption.
70  */
71 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
72                           struct amdgpu_bo_va **bo_va)
73 {
74         uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
75         struct ww_acquire_ctx ticket;
76         struct list_head list;
77         struct amdgpu_bo_list_entry pd;
78         struct ttm_validate_buffer csa_tv;
79         int r;
80
81         INIT_LIST_HEAD(&list);
82         INIT_LIST_HEAD(&csa_tv.head);
83         csa_tv.bo = &adev->virt.csa_obj->tbo;
84         csa_tv.shared = true;
85
86         list_add(&csa_tv.head, &list);
87         amdgpu_vm_get_pd_bo(vm, &list, &pd);
88
89         r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
90         if (r) {
91                 DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
92                 return r;
93         }
94
95         *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
96         if (!*bo_va) {
97                 ttm_eu_backoff_reservation(&ticket, &list);
98                 DRM_ERROR("failed to create bo_va for static CSA\n");
99                 return -ENOMEM;
100         }
101
102         r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
103                                 AMDGPU_CSA_SIZE);
104         if (r) {
105                 DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
106                 amdgpu_vm_bo_rmv(adev, *bo_va);
107                 ttm_eu_backoff_reservation(&ticket, &list);
108                 return r;
109         }
110
111         r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
112                              AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
113                              AMDGPU_PTE_EXECUTABLE);
114
115         if (r) {
116                 DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
117                 amdgpu_vm_bo_rmv(adev, *bo_va);
118                 ttm_eu_backoff_reservation(&ticket, &list);
119                 return r;
120         }
121
122         ttm_eu_backoff_reservation(&ticket, &list);
123         return 0;
124 }
125
126 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
127 {
128         /* enable virtual display */
129         adev->mode_info.num_crtc = 1;
130         adev->enable_virtual_display = true;
131         adev->cg_flags = 0;
132         adev->pg_flags = 0;
133 }
134
135 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
136 {
137         signed long r, cnt = 0;
138         unsigned long flags;
139         uint32_t seq;
140         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
141         struct amdgpu_ring *ring = &kiq->ring;
142
143         BUG_ON(!ring->funcs->emit_rreg);
144
145         spin_lock_irqsave(&kiq->ring_lock, flags);
146         amdgpu_ring_alloc(ring, 32);
147         amdgpu_ring_emit_rreg(ring, reg);
148         amdgpu_fence_emit_polling(ring, &seq);
149         amdgpu_ring_commit(ring);
150         spin_unlock_irqrestore(&kiq->ring_lock, flags);
151
152         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
153
154         /* don't wait anymore for gpu reset case because this way may
155          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
156          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
157          * never return if we keep waiting in virt_kiq_rreg, which cause
158          * gpu_recover() hang there.
159          *
160          * also don't wait anymore for IRQ context
161          * */
162         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
163                 goto failed_kiq_read;
164
165         if (in_interrupt())
166                 might_sleep();
167
168         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
169                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
170                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
171         }
172
173         if (cnt > MAX_KIQ_REG_TRY)
174                 goto failed_kiq_read;
175
176         return adev->wb.wb[adev->virt.reg_val_offs];
177
178 failed_kiq_read:
179         pr_err("failed to read reg:%x\n", reg);
180         return ~0;
181 }
182
183 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
184 {
185         signed long r, cnt = 0;
186         unsigned long flags;
187         uint32_t seq;
188         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
189         struct amdgpu_ring *ring = &kiq->ring;
190
191         BUG_ON(!ring->funcs->emit_wreg);
192
193         spin_lock_irqsave(&kiq->ring_lock, flags);
194         amdgpu_ring_alloc(ring, 32);
195         amdgpu_ring_emit_wreg(ring, reg, v);
196         amdgpu_fence_emit_polling(ring, &seq);
197         amdgpu_ring_commit(ring);
198         spin_unlock_irqrestore(&kiq->ring_lock, flags);
199
200         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
201
202         /* don't wait anymore for gpu reset case because this way may
203          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
204          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
205          * never return if we keep waiting in virt_kiq_rreg, which cause
206          * gpu_recover() hang there.
207          *
208          * also don't wait anymore for IRQ context
209          * */
210         if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
211                 goto failed_kiq_write;
212
213         if (in_interrupt())
214                 might_sleep();
215
216         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
217
218                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
219                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
220         }
221
222         if (cnt > MAX_KIQ_REG_TRY)
223                 goto failed_kiq_write;
224
225         return;
226
227 failed_kiq_write:
228         pr_err("failed to write reg:%x\n", reg);
229 }
230
231 /**
232  * amdgpu_virt_request_full_gpu() - request full gpu access
233  * @amdgpu:     amdgpu device.
234  * @init:       is driver init time.
235  * When start to init/fini driver, first need to request full gpu access.
236  * Return: Zero if request success, otherwise will return error.
237  */
238 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
239 {
240         struct amdgpu_virt *virt = &adev->virt;
241         int r;
242
243         if (virt->ops && virt->ops->req_full_gpu) {
244                 r = virt->ops->req_full_gpu(adev, init);
245                 if (r)
246                         return r;
247
248                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
249         }
250
251         return 0;
252 }
253
254 /**
255  * amdgpu_virt_release_full_gpu() - release full gpu access
256  * @amdgpu:     amdgpu device.
257  * @init:       is driver init time.
258  * When finishing driver init/fini, need to release full gpu access.
259  * Return: Zero if release success, otherwise will returen error.
260  */
261 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
262 {
263         struct amdgpu_virt *virt = &adev->virt;
264         int r;
265
266         if (virt->ops && virt->ops->rel_full_gpu) {
267                 r = virt->ops->rel_full_gpu(adev, init);
268                 if (r)
269                         return r;
270
271                 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
272         }
273         return 0;
274 }
275
276 /**
277  * amdgpu_virt_reset_gpu() - reset gpu
278  * @amdgpu:     amdgpu device.
279  * Send reset command to GPU hypervisor to reset GPU that VM is using
280  * Return: Zero if reset success, otherwise will return error.
281  */
282 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
283 {
284         struct amdgpu_virt *virt = &adev->virt;
285         int r;
286
287         if (virt->ops && virt->ops->reset_gpu) {
288                 r = virt->ops->reset_gpu(adev);
289                 if (r)
290                         return r;
291
292                 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
293         }
294
295         return 0;
296 }
297
298 /**
299  * amdgpu_virt_wait_reset() - wait for reset gpu completed
300  * @amdgpu:     amdgpu device.
301  * Wait for GPU reset completed.
302  * Return: Zero if reset success, otherwise will return error.
303  */
304 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
305 {
306         struct amdgpu_virt *virt = &adev->virt;
307
308         if (!virt->ops || !virt->ops->wait_reset)
309                 return -EINVAL;
310
311         return virt->ops->wait_reset(adev);
312 }
313
314 /**
315  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
316  * @amdgpu:     amdgpu device.
317  * MM table is used by UVD and VCE for its initialization
318  * Return: Zero if allocate success.
319  */
320 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
321 {
322         int r;
323
324         if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
325                 return 0;
326
327         r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
328                                     AMDGPU_GEM_DOMAIN_VRAM,
329                                     &adev->virt.mm_table.bo,
330                                     &adev->virt.mm_table.gpu_addr,
331                                     (void *)&adev->virt.mm_table.cpu_addr);
332         if (r) {
333                 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
334                 return r;
335         }
336
337         memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
338         DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
339                  adev->virt.mm_table.gpu_addr,
340                  adev->virt.mm_table.cpu_addr);
341         return 0;
342 }
343
344 /**
345  * amdgpu_virt_free_mm_table() - free mm table memory
346  * @amdgpu:     amdgpu device.
347  * Free MM table memory
348  */
349 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
350 {
351         if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
352                 return;
353
354         amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
355                               &adev->virt.mm_table.gpu_addr,
356                               (void *)&adev->virt.mm_table.cpu_addr);
357         adev->virt.mm_table.gpu_addr = 0;
358 }
359
360
361 int amdgpu_virt_fw_reserve_get_checksum(void *obj,
362                                         unsigned long obj_size,
363                                         unsigned int key,
364                                         unsigned int chksum)
365 {
366         unsigned int ret = key;
367         unsigned long i = 0;
368         unsigned char *pos;
369
370         pos = (char *)obj;
371         /* calculate checksum */
372         for (i = 0; i < obj_size; ++i)
373                 ret += *(pos + i);
374         /* minus the chksum itself */
375         pos = (char *)&chksum;
376         for (i = 0; i < sizeof(chksum); ++i)
377                 ret -= *(pos + i);
378         return ret;
379 }
380
381 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
382 {
383         uint32_t pf2vf_size = 0;
384         uint32_t checksum = 0;
385         uint32_t checkval;
386         char *str;
387
388         adev->virt.fw_reserve.p_pf2vf = NULL;
389         adev->virt.fw_reserve.p_vf2pf = NULL;
390
391         if (adev->fw_vram_usage.va != NULL) {
392                 adev->virt.fw_reserve.p_pf2vf =
393                         (struct amdgim_pf2vf_info_header *)(
394                         adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
395                 AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
396                 AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
397                 AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
398
399                 /* pf2vf message must be in 4K */
400                 if (pf2vf_size > 0 && pf2vf_size < 4096) {
401                         checkval = amdgpu_virt_fw_reserve_get_checksum(
402                                 adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
403                                 adev->virt.fw_reserve.checksum_key, checksum);
404                         if (checkval == checksum) {
405                                 adev->virt.fw_reserve.p_vf2pf =
406                                         ((void *)adev->virt.fw_reserve.p_pf2vf +
407                                         pf2vf_size);
408                                 memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
409                                         sizeof(amdgim_vf2pf_info));
410                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
411                                         AMDGPU_FW_VRAM_VF2PF_VER);
412                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
413                                         sizeof(amdgim_vf2pf_info));
414                                 AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
415                                         &str);
416 #ifdef MODULE
417                                 if (THIS_MODULE->version != NULL)
418                                         strcpy(str, THIS_MODULE->version);
419                                 else
420 #endif
421                                         strcpy(str, "N/A");
422                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
423                                         0);
424                                 AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
425                                         amdgpu_virt_fw_reserve_get_checksum(
426                                         adev->virt.fw_reserve.p_vf2pf,
427                                         pf2vf_size,
428                                         adev->virt.fw_reserve.checksum_key, 0));
429                         }
430                 }
431         }
432 }
433
434