1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
6 #include "xe_dma_buf.h"
8 #include <kunit/test.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pci-p2pdma.h>
12 #include <drm/drm_device.h>
13 #include <drm/drm_prime.h>
14 #include <drm/ttm/ttm_tt.h>
16 #include "tests/xe_test.h"
18 #include "xe_device.h"
19 #include "xe_ttm_vram_mgr.h"
22 MODULE_IMPORT_NS(DMA_BUF);
24 static int xe_dma_buf_attach(struct dma_buf *dmabuf,
25 struct dma_buf_attachment *attach)
27 struct drm_gem_object *obj = attach->dmabuf->priv;
29 if (attach->peer2peer &&
30 pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
31 attach->peer2peer = false;
33 if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
36 xe_device_mem_access_get(to_xe_device(obj->dev));
40 static void xe_dma_buf_detach(struct dma_buf *dmabuf,
41 struct dma_buf_attachment *attach)
43 struct drm_gem_object *obj = attach->dmabuf->priv;
45 xe_device_mem_access_put(to_xe_device(obj->dev));
48 static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
50 struct drm_gem_object *obj = attach->dmabuf->priv;
51 struct xe_bo *bo = gem_to_xe_bo(obj);
52 struct xe_device *xe = xe_bo_device(bo);
56 * For now only support pinning in TT memory, for two reasons:
57 * 1) Avoid pinning in a placement not accessible to some importers.
58 * 2) Pinning in VRAM requires PIN accounting which is a to-do.
60 if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
61 drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
65 ret = xe_bo_migrate(bo, XE_PL_TT);
67 if (ret != -EINTR && ret != -ERESTARTSYS)
69 "Failed migrating dma-buf to TT memory: %pe\n",
74 ret = xe_bo_pin_external(bo);
80 static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
82 struct drm_gem_object *obj = attach->dmabuf->priv;
83 struct xe_bo *bo = gem_to_xe_bo(obj);
85 xe_bo_unpin_external(bo);
88 static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
89 enum dma_data_direction dir)
91 struct dma_buf *dma_buf = attach->dmabuf;
92 struct drm_gem_object *obj = dma_buf->priv;
93 struct xe_bo *bo = gem_to_xe_bo(obj);
97 if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
98 return ERR_PTR(-EOPNOTSUPP);
100 if (!xe_bo_is_pinned(bo)) {
101 if (!attach->peer2peer)
102 r = xe_bo_migrate(bo, XE_PL_TT);
104 r = xe_bo_validate(bo, NULL, false);
109 switch (bo->ttm.resource->mem_type) {
111 sgt = drm_prime_pages_to_sg(obj->dev,
113 bo->ttm.ttm->num_pages);
117 if (dma_map_sgtable(attach->dev, sgt, dir,
118 DMA_ATTR_SKIP_CPU_SYNC))
124 r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
126 bo->ttm.base.size, attach->dev,
132 return ERR_PTR(-EINVAL);
140 return ERR_PTR(-EBUSY);
143 static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
144 struct sg_table *sgt,
145 enum dma_data_direction dir)
147 struct dma_buf *dma_buf = attach->dmabuf;
148 struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
150 if (!xe_bo_is_vram(bo)) {
151 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
155 xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
159 static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
160 enum dma_data_direction direction)
162 struct drm_gem_object *obj = dma_buf->priv;
163 struct xe_bo *bo = gem_to_xe_bo(obj);
164 bool reads = (direction == DMA_BIDIRECTIONAL ||
165 direction == DMA_FROM_DEVICE);
170 /* Can we do interruptible lock here? */
171 xe_bo_lock(bo, false);
172 (void)xe_bo_migrate(bo, XE_PL_TT);
178 const struct dma_buf_ops xe_dmabuf_ops = {
179 .attach = xe_dma_buf_attach,
180 .detach = xe_dma_buf_detach,
181 .pin = xe_dma_buf_pin,
182 .unpin = xe_dma_buf_unpin,
183 .map_dma_buf = xe_dma_buf_map,
184 .unmap_dma_buf = xe_dma_buf_unmap,
185 .release = drm_gem_dmabuf_release,
186 .begin_cpu_access = xe_dma_buf_begin_cpu_access,
187 .mmap = drm_gem_dmabuf_mmap,
188 .vmap = drm_gem_dmabuf_vmap,
189 .vunmap = drm_gem_dmabuf_vunmap,
192 struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
194 struct xe_bo *bo = gem_to_xe_bo(obj);
198 return ERR_PTR(-EPERM);
200 buf = drm_gem_prime_export(obj, flags);
202 buf->ops = &xe_dmabuf_ops;
207 static struct drm_gem_object *
208 xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
209 struct dma_buf *dma_buf)
211 struct dma_resv *resv = dma_buf->resv;
212 struct xe_device *xe = to_xe_device(dev);
216 dma_resv_lock(resv, NULL);
217 bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
218 0, /* Will require 1way or 2way for vm_bind */
219 ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
224 dma_resv_unlock(resv);
226 return &bo->ttm.base;
229 dma_resv_unlock(resv);
233 static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
235 struct drm_gem_object *obj = attach->importer_priv;
236 struct xe_bo *bo = gem_to_xe_bo(obj);
238 XE_WARN_ON(xe_bo_evict(bo, false));
241 static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
242 .allow_peer2peer = true,
243 .move_notify = xe_dma_buf_move_notify
246 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
248 struct dma_buf_test_params {
249 struct xe_test_priv base;
250 const struct dma_buf_attach_ops *attach_ops;
251 bool force_different_devices;
255 #define to_dma_buf_test_params(_priv) \
256 container_of(_priv, struct dma_buf_test_params, base)
259 struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
260 struct dma_buf *dma_buf)
262 XE_TEST_DECLARE(struct dma_buf_test_params *test =
263 to_dma_buf_test_params
264 (xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
265 const struct dma_buf_attach_ops *attach_ops;
266 struct dma_buf_attachment *attach;
267 struct drm_gem_object *obj;
270 if (dma_buf->ops == &xe_dmabuf_ops) {
272 if (obj->dev == dev &&
273 !XE_TEST_ONLY(test && test->force_different_devices)) {
275 * Importing dmabuf exported from out own gem increases
276 * refcount on gem itself instead of f_count of dmabuf.
278 drm_gem_object_get(obj);
284 * Don't publish the bo until we have a valid attachment, and a
285 * valid attachment needs the bo address. So pre-create a bo before
286 * creating the attachment and publish.
292 attach_ops = &xe_dma_buf_attach_ops;
293 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
295 attach_ops = test->attach_ops;
298 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
299 if (IS_ERR(attach)) {
300 obj = ERR_CAST(attach);
304 /* Errors here will take care of freeing the bo. */
305 obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
310 get_dma_buf(dma_buf);
311 obj->import_attach = attach;
320 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
321 #include "tests/xe_dma_buf.c"