1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
7 #include "gt/intel_context.h"
8 #include "gt/intel_engine_pm.h"
9 #include "i915_gem_client_blt.h"
10 #include "i915_gem_object_blt.h"
14 struct drm_i915_gem_object *obj;
15 struct sg_table *pages;
16 struct i915_page_sizes page_sizes;
19 static int vma_set_pages(struct i915_vma *vma)
21 struct i915_sleeve *sleeve = vma->private;
23 vma->pages = sleeve->pages;
24 vma->page_sizes = sleeve->page_sizes;
29 static void vma_clear_pages(struct i915_vma *vma)
31 GEM_BUG_ON(!vma->pages);
35 static void vma_bind(struct i915_address_space *vm,
36 struct i915_vm_pt_stash *stash,
38 enum i915_cache_level cache_level,
41 vm->vma_ops.bind_vma(vm, stash, vma, cache_level, flags);
44 static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
46 vm->vma_ops.unbind_vma(vm, vma);
49 static const struct i915_vma_ops proxy_vma_ops = {
50 .set_pages = vma_set_pages,
51 .clear_pages = vma_clear_pages,
53 .unbind_vma = vma_unbind,
56 static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
57 struct drm_i915_gem_object *obj,
58 struct sg_table *pages,
59 struct i915_page_sizes *page_sizes)
61 struct i915_sleeve *sleeve;
65 sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
67 return ERR_PTR(-ENOMEM);
69 vma = i915_vma_instance(obj, vm, NULL);
75 vma->private = sleeve;
76 vma->ops = &proxy_vma_ops;
79 sleeve->pages = pages;
80 sleeve->page_sizes = *page_sizes;
89 static void destroy_sleeve(struct i915_sleeve *sleeve)
94 struct clear_pages_work {
96 struct dma_fence_cb cb;
97 struct i915_sw_fence wait;
98 struct work_struct work;
99 struct irq_work irq_work;
100 struct i915_sleeve *sleeve;
101 struct intel_context *ce;
105 static const char *clear_pages_work_driver_name(struct dma_fence *fence)
110 static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
115 static void clear_pages_work_release(struct dma_fence *fence)
117 struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
119 destroy_sleeve(w->sleeve);
121 i915_sw_fence_fini(&w->wait);
123 BUILD_BUG_ON(offsetof(typeof(*w), dma));
124 dma_fence_free(&w->dma);
127 static const struct dma_fence_ops clear_pages_work_ops = {
128 .get_driver_name = clear_pages_work_driver_name,
129 .get_timeline_name = clear_pages_work_timeline_name,
130 .release = clear_pages_work_release,
133 static void clear_pages_signal_irq_worker(struct irq_work *work)
135 struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
137 dma_fence_signal(&w->dma);
138 dma_fence_put(&w->dma);
141 static void clear_pages_dma_fence_cb(struct dma_fence *fence,
142 struct dma_fence_cb *cb)
144 struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
147 dma_fence_set_error(&w->dma, fence->error);
150 * Push the signalling of the fence into yet another worker to avoid
151 * the nightmare locking around the fence spinlock.
153 irq_work_queue(&w->irq_work);
156 static void clear_pages_worker(struct work_struct *work)
158 struct clear_pages_work *w = container_of(work, typeof(*w), work);
159 struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
160 struct i915_vma *vma = w->sleeve->vma;
161 struct i915_request *rq;
162 struct i915_vma *batch;
163 int err = w->dma.error;
168 if (obj->cache_dirty) {
169 if (i915_gem_object_has_struct_page(obj))
170 drm_clflush_sg(w->sleeve->pages);
171 obj->cache_dirty = false;
173 obj->read_domains = I915_GEM_GPU_DOMAINS;
174 obj->write_domain = 0;
176 err = i915_vma_pin(vma, 0, 0, PIN_USER);
180 batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
182 err = PTR_ERR(batch);
186 rq = intel_context_create_request(w->ce);
192 /* There's no way the fence has signalled */
193 if (dma_fence_add_callback(&rq->fence, &w->cb,
194 clear_pages_dma_fence_cb))
197 err = intel_emit_vma_mark_active(batch, rq);
201 if (w->ce->engine->emit_init_breadcrumb) {
202 err = w->ce->engine->emit_init_breadcrumb(rq);
208 * w->dma is already exported via (vma|obj)->resv we need only
209 * keep track of the GPU activity within this vma/request, and
210 * propagate the signal from the request to w->dma.
212 err = __i915_vma_move_to_active(vma, rq);
216 err = w->ce->engine->emit_bb_start(rq,
217 batch->node.start, batch->node.size,
221 i915_request_set_error_once(rq, err);
225 i915_request_add(rq);
227 intel_emit_vma_release(w->ce, batch);
232 dma_fence_set_error(&w->dma, err);
233 dma_fence_signal(&w->dma);
234 dma_fence_put(&w->dma);
238 static int __i915_sw_fence_call
239 clear_pages_work_notify(struct i915_sw_fence *fence,
240 enum i915_sw_fence_notify state)
242 struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
246 schedule_work(&w->work);
250 dma_fence_put(&w->dma);
257 static DEFINE_SPINLOCK(fence_lock);
259 /* XXX: better name please */
260 int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
261 struct intel_context *ce,
262 struct sg_table *pages,
263 struct i915_page_sizes *page_sizes,
266 struct clear_pages_work *work;
267 struct i915_sleeve *sleeve;
270 sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
272 return PTR_ERR(sleeve);
274 work = kmalloc(sizeof(*work), GFP_KERNEL);
276 destroy_sleeve(sleeve);
281 work->sleeve = sleeve;
284 INIT_WORK(&work->work, clear_pages_worker);
286 init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
288 dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
289 i915_sw_fence_init(&work->wait, clear_pages_work_notify);
291 i915_gem_object_lock(obj);
292 err = i915_sw_fence_await_reservation(&work->wait,
293 obj->base.resv, NULL, true, 0,
296 dma_fence_set_error(&work->dma, err);
298 dma_resv_add_excl_fence(obj->base.resv, &work->dma);
301 i915_gem_object_unlock(obj);
303 dma_fence_get(&work->dma);
304 i915_sw_fence_commit(&work->wait);
309 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
310 #include "selftests/i915_gem_client_blt.c"