2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include "igt_spinner.h"
9 int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
15 GEM_BUG_ON(INTEL_GEN(i915) < 8);
17 memset(spin, 0, sizeof(*spin));
20 spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
21 if (IS_ERR(spin->hws)) {
22 err = PTR_ERR(spin->hws);
26 spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
27 if (IS_ERR(spin->obj)) {
28 err = PTR_ERR(spin->obj);
32 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
33 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
38 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
40 mode = i915_coherent_map_type(i915);
41 vaddr = i915_gem_object_pin_map(spin->obj, mode);
51 i915_gem_object_unpin_map(spin->hws);
53 i915_gem_object_put(spin->obj);
55 i915_gem_object_put(spin->hws);
60 static unsigned int seqno_offset(u64 fence)
62 return offset_in_page(sizeof(u32) * fence);
65 static u64 hws_address(const struct i915_vma *hws,
66 const struct i915_request *rq)
68 return hws->node.start + seqno_offset(rq->fence.context);
71 static int move_to_active(struct i915_vma *vma,
72 struct i915_request *rq,
77 err = i915_vma_move_to_active(vma, rq, flags);
81 if (!i915_gem_object_has_active_reference(vma->obj)) {
82 i915_gem_object_get(vma->obj);
83 i915_gem_object_set_active_reference(vma->obj);
90 igt_spinner_create_request(struct igt_spinner *spin,
91 struct i915_gem_context *ctx,
92 struct intel_engine_cs *engine,
93 u32 arbitration_command)
95 struct i915_address_space *vm = &ctx->ppgtt->vm;
96 struct i915_request *rq = NULL;
97 struct i915_vma *hws, *vma;
101 vma = i915_vma_instance(spin->obj, vm, NULL);
103 return ERR_CAST(vma);
105 hws = i915_vma_instance(spin->hws, vm, NULL);
107 return ERR_CAST(hws);
109 err = i915_vma_pin(vma, 0, 0, PIN_USER);
113 err = i915_vma_pin(hws, 0, 0, PIN_USER);
117 rq = i915_request_alloc(engine, ctx);
123 err = move_to_active(vma, rq, 0);
127 err = move_to_active(hws, rq, 0);
133 *batch++ = MI_STORE_DWORD_IMM_GEN4;
134 *batch++ = lower_32_bits(hws_address(hws, rq));
135 *batch++ = upper_32_bits(hws_address(hws, rq));
136 *batch++ = rq->fence.seqno;
138 *batch++ = arbitration_command;
140 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
141 *batch++ = lower_32_bits(vma->node.start);
142 *batch++ = upper_32_bits(vma->node.start);
143 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
145 i915_gem_chipset_flush(spin->i915);
147 if (engine->emit_init_breadcrumb &&
148 rq->timeline->has_initial_breadcrumb) {
149 err = engine->emit_init_breadcrumb(rq);
154 err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
158 i915_request_skip(rq, err);
159 i915_request_add(rq);
165 return err ? ERR_PTR(err) : rq;
169 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
171 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
173 return READ_ONCE(*seqno);
176 void igt_spinner_end(struct igt_spinner *spin)
178 *spin->batch = MI_BATCH_BUFFER_END;
179 i915_gem_chipset_flush(spin->i915);
182 void igt_spinner_fini(struct igt_spinner *spin)
184 igt_spinner_end(spin);
186 i915_gem_object_unpin_map(spin->obj);
187 i915_gem_object_put(spin->obj);
189 i915_gem_object_unpin_map(spin->hws);
190 i915_gem_object_put(spin->hws);
193 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
195 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
198 wait_for(i915_seqno_passed(hws_seqno(spin, rq),