X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fgem%2Fi915_gem_execbuffer.c;h=cb86574bdab4ae78fc4cc9caf9673d4e7291a962;hb=6ff6d61dd2a943bd0c80bb77eb5630e8aa0cac15;hp=5964e67c7d36314838c7839391eeab3954d2c2dd;hpb=c70a4be130de333ea079c59da41cc959712bb01c;p=linux-2.6-microblaze.git diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5964e67c7d36..cb86574bdab4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -274,7 +274,7 @@ struct i915_execbuffer { struct drm_mm_node node; /** temporary GTT binding */ unsigned long vaddr; /** Current kmap address */ unsigned long page; /** Currently mapped page index */ - unsigned int gen; /** Cached value of INTEL_GEN */ + unsigned int graphics_ver; /** Cached value of GRAPHICS_VER */ bool use_64bit_reloc : 1; bool has_llc : 1; bool has_fence : 1; @@ -290,7 +290,6 @@ struct i915_execbuffer { struct intel_context *reloc_context; u64 invalid_flags; /** Set of execobj.flags that are invalid */ - u32 context_flags; /** Set of execobj.flags to insert from the ctx */ u64 batch_len; /** Length of batch within object */ u32 batch_start_offset; /** Location within object of batch */ @@ -500,7 +499,7 @@ eb_validate_vma(struct i915_execbuffer *eb, * also covers all platforms with local memory. */ if (entry->relocation_count && - INTEL_GEN(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915)) + GRAPHICS_VER(eb->i915) >= 12 && !IS_TIGERLAKE(eb->i915)) return -EINVAL; if (unlikely(entry->flags & eb->invalid_flags)) @@ -541,9 +540,6 @@ eb_validate_vma(struct i915_execbuffer *eb, entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP; } - if (!(entry->flags & EXEC_OBJECT_PINNED)) - entry->flags |= eb->context_flags; - return 0; } @@ -750,10 +746,6 @@ static int eb_select_context(struct i915_execbuffer *eb) if (rcu_access_pointer(ctx->vm)) eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; - eb->context_flags = 0; - if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) - eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; - return 0; } @@ -922,21 +914,38 @@ err: return err; } -static int eb_validate_vmas(struct i915_execbuffer *eb) +static int eb_lock_vmas(struct i915_execbuffer *eb) { unsigned int i; int err; - INIT_LIST_HEAD(&eb->unbound); - for (i = 0; i < eb->buffer_count; i++) { - struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; err = i915_gem_object_lock(vma->obj, &eb->ww); if (err) return err; + } + + return 0; +} + +static int eb_validate_vmas(struct i915_execbuffer *eb) +{ + unsigned int i; + int err; + + INIT_LIST_HEAD(&eb->unbound); + + err = eb_lock_vmas(eb); + if (err) + return err; + + for (i = 0; i < eb->buffer_count; i++) { + struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; err = eb_pin_vma(eb, entry, ev); if (err == -EDEADLK) @@ -994,7 +1003,7 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) } } -static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release_userptr) +static void eb_release_vmas(struct i915_execbuffer *eb, bool final) { const unsigned int count = eb->buffer_count; unsigned int i; @@ -1008,11 +1017,6 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release eb_unreserve_vma(ev); - if (release_userptr && ev->flags & __EXEC_OBJECT_USERPTR_INIT) { - ev->flags &= ~__EXEC_OBJECT_USERPTR_INIT; - i915_gem_object_userptr_submit_fini(vma->obj); - } - if (final) i915_vma_put(vma); } @@ -1049,10 +1053,10 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->page = -1; cache->vaddr = 0; /* Must be a variable in the struct to allow GCC to unroll. */ - cache->gen = INTEL_GEN(i915); + cache->graphics_ver = GRAPHICS_VER(i915); cache->has_llc = HAS_LLC(i915); cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); - cache->has_fence = cache->gen < 4; + cache->has_fence = cache->graphics_ver < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->node.flags = 0; reloc_cache_clear(cache); @@ -1402,7 +1406,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, err = eb->engine->emit_bb_start(rq, batch->node.start, PAGE_SIZE, - cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); + cache->graphics_ver > 5 ? 0 : I915_DISPATCH_SECURE); if (err) goto skip_request; @@ -1439,7 +1443,7 @@ err_pool: static bool reloc_can_use_engine(const struct intel_engine_cs *engine) { - return engine->class != VIDEO_DECODE_CLASS || !IS_GEN(engine->i915, 6); + return engine->class != VIDEO_DECODE_CLASS || GRAPHICS_VER(engine->i915) != 6; } static u32 *reloc_gpu(struct i915_execbuffer *eb, @@ -1481,7 +1485,7 @@ static inline bool use_reloc_gpu(struct i915_vma *vma) if (DBG_FORCE_RELOC) return false; - return !dma_resv_test_signaled_rcu(vma->resv, true); + return !dma_resv_test_signaled(vma->resv, true); } static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset) @@ -1503,14 +1507,14 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, u64 offset, u64 target_addr) { - const unsigned int gen = eb->reloc_cache.gen; + const unsigned int ver = eb->reloc_cache.graphics_ver; unsigned int len; u32 *batch; u64 addr; - if (gen >= 8) + if (ver >= 8) len = offset & 7 ? 8 : 5; - else if (gen >= 4) + else if (ver >= 4) len = 4; else len = 3; @@ -1522,7 +1526,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, return false; addr = gen8_canonical_addr(vma->node.start + offset); - if (gen >= 8) { + if (ver >= 8) { if (offset & 7) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = lower_32_bits(addr); @@ -1542,7 +1546,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, *batch++ = lower_32_bits(target_addr); *batch++ = upper_32_bits(target_addr); } - } else if (gen >= 6) { + } else if (ver >= 6) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = 0; *batch++ = addr; @@ -1552,12 +1556,12 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb, *batch++ = 0; *batch++ = vma_phys_addr(vma, offset); *batch++ = target_addr; - } else if (gen >= 4) { + } else if (ver >= 4) { *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = addr; *batch++ = target_addr; - } else if (gen >= 3 && + } else if (ver >= 3 && !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) { *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; *batch++ = addr; @@ -1671,7 +1675,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, * batchbuffers. */ if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - IS_GEN(eb->i915, 6)) { + GRAPHICS_VER(eb->i915) == 6) { err = i915_vma_bind(target->vma, target->vma->obj->cache_level, PIN_GLOBAL, NULL); @@ -1990,7 +1994,7 @@ repeat: } /* We may process another execbuffer during the unlock... */ - eb_release_vmas(eb, false, true); + eb_release_vmas(eb, false); i915_gem_ww_ctx_fini(&eb->ww); if (rq) { @@ -2059,9 +2063,7 @@ repeat_validate: list_for_each_entry(ev, &eb->relocs, reloc_link) { if (!have_copy) { - pagefault_disable(); err = eb_relocate_vma(eb, ev); - pagefault_enable(); if (err) break; } else { @@ -2094,7 +2096,7 @@ repeat_validate: err: if (err == -EDEADLK) { - eb_release_vmas(eb, false, false); + eb_release_vmas(eb, false); err = i915_gem_ww_ctx_backoff(&eb->ww); if (!err) goto repeat_validate; @@ -2191,7 +2193,7 @@ retry: err: if (err == -EDEADLK) { - eb_release_vmas(eb, false, false); + eb_release_vmas(eb, false); err = i915_gem_ww_ctx_backoff(&eb->ww); if (!err) goto retry; @@ -2268,7 +2270,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) #ifdef CONFIG_MMU_NOTIFIER if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) { - spin_lock(&eb->i915->mm.notifier_lock); + read_lock(&eb->i915->mm.notifier_lock); /* * count is always at least 1, otherwise __EXEC_USERPTR_USED @@ -2286,7 +2288,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) break; } - spin_unlock(&eb->i915->mm.notifier_lock); + read_unlock(&eb->i915->mm.notifier_lock); } #endif @@ -2332,7 +2334,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) u32 *cs; int i; - if (!IS_GEN(rq->engine->i915, 7) || rq->engine->id != RCS0) { + if (GRAPHICS_VER(rq->engine->i915) != 7 || rq->engine->id != RCS0) { drm_dbg(&rq->engine->i915->drm, "sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -3375,7 +3377,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { - if (INTEL_GEN(i915) >= 11) + if (GRAPHICS_VER(i915) >= 11) return -ENODEV; /* Return -EPERM to trigger fallback code on old binaries. */ @@ -3435,7 +3437,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, err = eb_lookup_vmas(&eb); if (err) { - eb_release_vmas(&eb, true, true); + eb_release_vmas(&eb, true); goto err_engine; } @@ -3528,7 +3530,7 @@ err_request: i915_request_put(eb.request); err_vma: - eb_release_vmas(&eb, true, true); + eb_release_vmas(&eb, true); if (eb.trampoline) i915_vma_unpin(eb.trampoline); WARN_ON(err == -EDEADLK);