2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
27 #include "amdgpu_gfx.h"
28 #include <linux/module.h>
29 #include <linux/dma-buf.h>
31 const struct kgd2kfd_calls *kgd2kfd;
33 static const unsigned int compute_vmid_bitmap = 0xFF00;
35 /* Total memory size in system memory and all GPU VRAM. Used to
36 * estimate worst case amount of memory to reserve for page tables
38 uint64_t amdgpu_amdkfd_total_mem_size;
40 int amdgpu_amdkfd_init(void)
46 amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
47 amdgpu_amdkfd_total_mem_size *= si.mem_unit;
50 ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
53 amdgpu_amdkfd_gpuvm_init_mem_limits();
62 void amdgpu_amdkfd_fini(void)
68 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
70 const struct kfd2kgd_calls *kfd2kgd;
75 switch (adev->asic_type) {
76 #ifdef CONFIG_DRM_AMDGPU_CIK
79 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
88 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
94 kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
97 dev_info(adev->dev, "kfd not supported on this ASIC\n");
101 adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
102 adev->pdev, kfd2kgd);
105 amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
109 * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
112 * @adev: amdgpu_device pointer
113 * @aperture_base: output returning doorbell aperture base physical address
114 * @aperture_size: output returning doorbell aperture size in bytes
115 * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
117 * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
118 * takes doorbells required for its own rings and reports the setup to amdkfd.
119 * amdgpu reserved doorbells are at the start of the doorbell aperture.
121 static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
122 phys_addr_t *aperture_base,
123 size_t *aperture_size,
124 size_t *start_offset)
127 * The first num_doorbells are used by amdgpu.
128 * amdkfd takes whatever's left in the aperture.
130 if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
131 *aperture_base = adev->doorbell.base;
132 *aperture_size = adev->doorbell.size;
133 *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
141 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
147 struct kgd2kfd_shared_resources gpu_resources = {
148 .compute_vmid_bitmap = compute_vmid_bitmap,
149 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
150 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
151 .gpuvm_size = min(adev->vm_manager.max_pfn
152 << AMDGPU_GPU_PAGE_SHIFT,
153 AMDGPU_GMC_HOLE_START),
154 .drm_render_minor = adev->ddev->render->index
157 /* this is going to have a few of the MSBs set that we need to
159 bitmap_complement(gpu_resources.queue_bitmap,
160 adev->gfx.mec.queue_bitmap,
163 /* remove the KIQ bit as well */
164 if (adev->gfx.kiq.ring.sched.ready)
165 clear_bit(amdgpu_gfx_queue_to_bit(adev,
166 adev->gfx.kiq.ring.me - 1,
167 adev->gfx.kiq.ring.pipe,
168 adev->gfx.kiq.ring.queue),
169 gpu_resources.queue_bitmap);
171 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
172 * nbits is not compile time constant */
173 last_valid_bit = 1 /* only first MEC can have compute queues */
174 * adev->gfx.mec.num_pipe_per_mec
175 * adev->gfx.mec.num_queue_per_pipe;
176 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
177 clear_bit(i, gpu_resources.queue_bitmap);
179 amdgpu_doorbell_get_kfd_info(adev,
180 &gpu_resources.doorbell_physical_address,
181 &gpu_resources.doorbell_aperture_size,
182 &gpu_resources.doorbell_start_offset);
184 if (adev->asic_type < CHIP_VEGA10) {
185 kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
189 n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8;
191 for (i = 0; i < n; i += 2) {
192 /* On SOC15 the BIF is involved in routing
193 * doorbells using the low 12 bits of the
194 * address. Communicate the assignments to
195 * KFD. KFD uses two doorbell pages per
196 * process in case of 64-bit doorbells so we
197 * can use each doorbell assignment twice.
199 gpu_resources.sdma_doorbell[0][i] =
200 adev->doorbell_index.sdma_engine0 + (i >> 1);
201 gpu_resources.sdma_doorbell[0][i+1] =
202 adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
203 gpu_resources.sdma_doorbell[1][i] =
204 adev->doorbell_index.sdma_engine1 + (i >> 1);
205 gpu_resources.sdma_doorbell[1][i+1] =
206 adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
208 /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
209 * SDMA, IH and VCN. So don't use them for the CP.
211 gpu_resources.reserved_doorbell_mask = 0x1e0;
212 gpu_resources.reserved_doorbell_val = 0x0e0;
214 kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
218 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
221 kgd2kfd->device_exit(adev->kfd.dev);
222 adev->kfd.dev = NULL;
226 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
227 const void *ih_ring_entry)
230 kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
233 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
236 kgd2kfd->suspend(adev->kfd.dev);
239 int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
244 r = kgd2kfd->resume(adev->kfd.dev);
249 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
254 r = kgd2kfd->pre_reset(adev->kfd.dev);
259 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
264 r = kgd2kfd->post_reset(adev->kfd.dev);
269 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
271 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
273 if (amdgpu_device_should_recover_gpu(adev))
274 amdgpu_device_gpu_recover(adev, NULL);
277 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
278 void **mem_obj, uint64_t *gpu_addr,
279 void **cpu_ptr, bool mqd_gfx9)
281 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
282 struct amdgpu_bo *bo = NULL;
283 struct amdgpu_bo_param bp;
285 void *cpu_ptr_tmp = NULL;
287 memset(&bp, 0, sizeof(bp));
289 bp.byte_align = PAGE_SIZE;
290 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
291 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
292 bp.type = ttm_bo_type_kernel;
296 bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
298 r = amdgpu_bo_create(adev, &bp, &bo);
301 "failed to allocate BO for amdkfd (%d)\n", r);
306 r = amdgpu_bo_reserve(bo, true);
308 dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
309 goto allocate_mem_reserve_bo_failed;
312 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
314 dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
315 goto allocate_mem_pin_bo_failed;
318 r = amdgpu_ttm_alloc_gart(&bo->tbo);
320 dev_err(adev->dev, "%p bind failed\n", bo);
321 goto allocate_mem_kmap_bo_failed;
324 r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
327 "(%d) failed to map bo to kernel for amdkfd\n", r);
328 goto allocate_mem_kmap_bo_failed;
332 *gpu_addr = amdgpu_bo_gpu_offset(bo);
333 *cpu_ptr = cpu_ptr_tmp;
335 amdgpu_bo_unreserve(bo);
339 allocate_mem_kmap_bo_failed:
341 allocate_mem_pin_bo_failed:
342 amdgpu_bo_unreserve(bo);
343 allocate_mem_reserve_bo_failed:
344 amdgpu_bo_unref(&bo);
349 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
351 struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
353 amdgpu_bo_reserve(bo, true);
354 amdgpu_bo_kunmap(bo);
356 amdgpu_bo_unreserve(bo);
357 amdgpu_bo_unref(&(bo));
360 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
361 struct kfd_local_mem_info *mem_info)
363 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
364 uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
366 resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
368 memset(mem_info, 0, sizeof(*mem_info));
369 if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
370 mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
371 mem_info->local_mem_size_private = adev->gmc.real_vram_size -
372 adev->gmc.visible_vram_size;
374 mem_info->local_mem_size_public = 0;
375 mem_info->local_mem_size_private = adev->gmc.real_vram_size;
377 mem_info->vram_width = adev->gmc.vram_width;
379 pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
380 &adev->gmc.aper_base, &aper_limit,
381 mem_info->local_mem_size_public,
382 mem_info->local_mem_size_private);
384 if (amdgpu_sriov_vf(adev))
385 mem_info->mem_clk_max = adev->clock.default_mclk / 100;
386 else if (adev->powerplay.pp_funcs)
387 mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
389 mem_info->mem_clk_max = 100;
392 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
394 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
396 if (adev->gfx.funcs->get_gpu_clock_counter)
397 return adev->gfx.funcs->get_gpu_clock_counter(adev);
401 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
403 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
405 /* the sclk is in quantas of 10kHz */
406 if (amdgpu_sriov_vf(adev))
407 return adev->clock.default_sclk / 100;
408 else if (adev->powerplay.pp_funcs)
409 return amdgpu_dpm_get_sclk(adev, false) / 100;
414 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
416 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
417 struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
419 memset(cu_info, 0, sizeof(*cu_info));
420 if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
423 cu_info->cu_active_number = acu_info.number;
424 cu_info->cu_ao_mask = acu_info.ao_cu_mask;
425 memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
426 sizeof(acu_info.bitmap));
427 cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
428 cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
429 cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
430 cu_info->simd_per_cu = acu_info.simd_per_cu;
431 cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
432 cu_info->wave_front_size = acu_info.wave_front_size;
433 cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
434 cu_info->lds_size = acu_info.lds_size;
437 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
438 struct kgd_dev **dma_buf_kgd,
439 uint64_t *bo_size, void *metadata_buffer,
440 size_t buffer_size, uint32_t *metadata_size,
443 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
444 struct dma_buf *dma_buf;
445 struct drm_gem_object *obj;
446 struct amdgpu_bo *bo;
447 uint64_t metadata_flags;
450 dma_buf = dma_buf_get(dma_buf_fd);
452 return PTR_ERR(dma_buf);
454 if (dma_buf->ops != &amdgpu_dmabuf_ops)
455 /* Can't handle non-graphics buffers */
459 if (obj->dev->driver != adev->ddev->driver)
460 /* Can't handle buffers from different drivers */
463 adev = obj->dev->dev_private;
464 bo = gem_to_amdgpu_bo(obj);
465 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
466 AMDGPU_GEM_DOMAIN_GTT)))
467 /* Only VRAM and GTT BOs are supported */
472 *dma_buf_kgd = (struct kgd_dev *)adev;
474 *bo_size = amdgpu_bo_size(bo);
476 *metadata_size = bo->metadata_size;
478 r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
479 metadata_size, &metadata_flags);
481 *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
482 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
484 if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
485 *flags |= ALLOC_MEM_FLAGS_PUBLIC;
489 dma_buf_put(dma_buf);
493 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
495 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
497 return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
500 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
502 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
504 return adev->gmc.xgmi.hive_id;
507 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
508 uint32_t vmid, uint64_t gpu_addr,
509 uint32_t *ib_cmd, uint32_t ib_len)
511 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
512 struct amdgpu_job *job;
513 struct amdgpu_ib *ib;
514 struct amdgpu_ring *ring;
515 struct dma_fence *f = NULL;
519 case KGD_ENGINE_MEC1:
520 ring = &adev->gfx.compute_ring[0];
522 case KGD_ENGINE_SDMA1:
523 ring = &adev->sdma.instance[0].ring;
525 case KGD_ENGINE_SDMA2:
526 ring = &adev->sdma.instance[1].ring;
529 pr_err("Invalid engine in IB submission: %d\n", engine);
534 ret = amdgpu_job_alloc(adev, 1, &job, NULL);
539 memset(ib, 0, sizeof(struct amdgpu_ib));
541 ib->gpu_addr = gpu_addr;
543 ib->length_dw = ib_len;
544 /* This works for NO_HWS. TODO: need to handle without knowing VMID */
547 ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
549 DRM_ERROR("amdgpu: failed to schedule IB.\n");
553 ret = dma_fence_wait(f, false);
557 amdgpu_job_free(job);
562 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
564 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
566 if (adev->powerplay.pp_funcs &&
567 adev->powerplay.pp_funcs->switch_power_profile)
568 amdgpu_dpm_switch_power_profile(adev,
569 PP_SMC_POWER_PROFILE_COMPUTE,
573 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
576 if ((1 << vmid) & compute_vmid_bitmap)
583 #ifndef CONFIG_HSA_AMD
584 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
589 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
593 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
594 struct amdgpu_vm *vm)
598 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
603 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
608 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
613 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
618 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)