2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * based on nouveau_prime.c
24 * Authors: Alex Deucher
28 * DOC: PRIME Buffer Sharing
30 * The following callback implementations are used for :ref:`sharing GEM buffer
31 * objects between different devices via PRIME <prime_buffer_sharing>`.
37 #include "amdgpu_display.h"
38 #include "amdgpu_gem.h"
39 #include <drm/amdgpu_drm.h>
40 #include <linux/dma-buf.h>
43 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
45 * @obj: GEM buffer object (BO)
48 * A scatter/gather table for the pinned pages of the BO's memory.
50 struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
52 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
53 int npages = bo->tbo.num_pages;
55 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
59 * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
62 * Sets up an in-kernel virtual mapping of the BO's memory.
65 * The virtual address of the mapping or an error pointer.
67 void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
69 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
72 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
77 return bo->dma_buf_vmap.virtual;
81 * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
83 * @vaddr: Virtual address (unused)
85 * Tears down the in-kernel virtual mapping of the BO's memory.
87 void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
89 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
91 ttm_bo_kunmap(&bo->dma_buf_vmap);
95 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
97 * @vma: Virtual memory area
99 * Sets up a userspace mapping of the BO's memory in the given
100 * virtual memory area.
103 * 0 on success or a negative error code on failure.
105 int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
107 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
108 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
109 unsigned asize = amdgpu_bo_size(bo);
118 /* Check for valid size. */
119 if (asize < vma->vm_end - vma->vm_start)
122 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
123 (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
126 vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
128 /* prime mmap does not need to check access, so allow here */
129 ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
133 ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
134 drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
140 * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
143 * @attach: DMA-buf attachment
144 * @sg: Scatter/gather table
146 * Imports shared DMA buffer memory exported by another device.
149 * A new GEM BO of the given DRM device, representing the memory
150 * described by the given DMA-buf attachment and scatter/gather table.
152 struct drm_gem_object *
153 amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
154 struct dma_buf_attachment *attach,
157 struct reservation_object *resv = attach->dmabuf->resv;
158 struct amdgpu_device *adev = dev->dev_private;
159 struct amdgpu_bo *bo;
160 struct amdgpu_bo_param bp;
163 memset(&bp, 0, sizeof(bp));
164 bp.size = attach->dmabuf->size;
165 bp.byte_align = PAGE_SIZE;
166 bp.domain = AMDGPU_GEM_DOMAIN_CPU;
168 bp.type = ttm_bo_type_sg;
170 ww_mutex_lock(&resv->lock, NULL);
171 ret = amdgpu_bo_create(adev, &bp, &bo);
176 bo->tbo.ttm->sg = sg;
177 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
178 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
179 if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
180 bo->prime_shared_count = 1;
182 ww_mutex_unlock(&resv->lock);
183 return &bo->gem_base;
186 ww_mutex_unlock(&resv->lock);
191 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
192 * @dma_buf: Shared DMA buffer
193 * @attach: DMA-buf attachment
195 * Makes sure that the shared DMA buffer can be accessed by the target device.
196 * For now, simply pins it to the GTT domain, where it should be accessible by
200 * 0 on success or a negative error code on failure.
202 static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
203 struct dma_buf_attachment *attach)
205 struct drm_gem_object *obj = dma_buf->priv;
206 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
207 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
210 r = drm_gem_map_attach(dma_buf, attach);
214 r = amdgpu_bo_reserve(bo, false);
215 if (unlikely(r != 0))
219 if (attach->dev->driver != adev->dev->driver) {
221 * Wait for all shared fences to complete before we switch to future
222 * use of exclusive fence on this prime shared bo.
224 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
226 MAX_SCHEDULE_TIMEOUT);
227 if (unlikely(r < 0)) {
228 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
229 goto error_unreserve;
233 /* pin buffer into GTT */
234 r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
236 goto error_unreserve;
238 if (attach->dev->driver != adev->dev->driver)
239 bo->prime_shared_count++;
242 amdgpu_bo_unreserve(bo);
246 drm_gem_map_detach(dma_buf, attach);
251 * amdgpu_gem_map_detach - &dma_buf_ops.detach implementation
252 * @dma_buf: Shared DMA buffer
253 * @attach: DMA-buf attachment
255 * This is called when a shared DMA buffer no longer needs to be accessible by
256 * another device. For now, simply unpins the buffer from GTT.
258 static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
259 struct dma_buf_attachment *attach)
261 struct drm_gem_object *obj = dma_buf->priv;
262 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
263 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
266 ret = amdgpu_bo_reserve(bo, true);
267 if (unlikely(ret != 0))
271 if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
272 bo->prime_shared_count--;
273 amdgpu_bo_unreserve(bo);
276 drm_gem_map_detach(dma_buf, attach);
280 * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
284 * The BO's reservation object.
286 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
288 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
294 * amdgpu_gem_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
295 * @dma_buf: Shared DMA buffer
296 * @direction: Direction of DMA transfer
298 * This is called before CPU access to the shared DMA buffer's memory. If it's
299 * a read access, the buffer is moved to the GTT domain if possible, for optimal
300 * CPU read performance.
303 * 0 on success or a negative error code on failure.
305 static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
306 enum dma_data_direction direction)
308 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
309 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
310 struct ttm_operation_ctx ctx = { true, false };
311 u32 domain = amdgpu_display_supported_domains(adev);
313 bool reads = (direction == DMA_BIDIRECTIONAL ||
314 direction == DMA_FROM_DEVICE);
316 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
320 ret = amdgpu_bo_reserve(bo, false);
321 if (unlikely(ret != 0))
324 if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
325 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
326 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
329 amdgpu_bo_unreserve(bo);
333 const struct dma_buf_ops amdgpu_dmabuf_ops = {
334 .attach = amdgpu_gem_map_attach,
335 .detach = amdgpu_gem_map_detach,
336 .map_dma_buf = drm_gem_map_dma_buf,
337 .unmap_dma_buf = drm_gem_unmap_dma_buf,
338 .release = drm_gem_dmabuf_release,
339 .begin_cpu_access = amdgpu_gem_begin_cpu_access,
340 .mmap = drm_gem_dmabuf_mmap,
341 .vmap = drm_gem_dmabuf_vmap,
342 .vunmap = drm_gem_dmabuf_vunmap,
346 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
349 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
351 * The main work is done by the &drm_gem_prime_export helper, which in turn
352 * uses &amdgpu_gem_prime_res_obj.
355 * Shared DMA buffer representing the GEM BO from the given device.
357 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
358 struct drm_gem_object *gobj,
361 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
364 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
365 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
366 return ERR_PTR(-EPERM);
368 buf = drm_gem_prime_export(dev, gobj, flags);
370 buf->file->f_mapping = dev->anon_inode->i_mapping;
371 buf->ops = &amdgpu_dmabuf_ops;
378 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
380 * @dma_buf: Shared DMA buffer
382 * The main work is done by the &drm_gem_prime_import helper, which in turn
383 * uses &amdgpu_gem_prime_import_sg_table.
386 * GEM BO representing the shared DMA buffer for the given device.
388 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
389 struct dma_buf *dma_buf)
391 struct drm_gem_object *obj;
393 if (dma_buf->ops == &amdgpu_dmabuf_ops) {
395 if (obj->dev == dev) {
397 * Importing dmabuf exported from out own gem increases
398 * refcount on gem itself instead of f_count of dmabuf.
400 drm_gem_object_get(obj);
405 return drm_gem_prime_import(dev, dma_buf);