static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
{
struct i915_address_space *vm;
- struct page *page;
u32 *vaddr;
int err = 0;
if (!vm)
return -ENODEV;
- page = __px_page(vm->scratch[0]);
- if (!page) {
+ if (!vm->scratch[0]) {
pr_err("No scratch page!\n");
return -EINVAL;
}
- vaddr = kmap(page);
- if (!vaddr) {
- pr_err("No (mappable) scratch page!\n");
- return -EINVAL;
- }
+ vaddr = __px_vaddr(vm->scratch[0]);
memcpy(out, vaddr, sizeof(*out));
if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
pr_err("Inconsistent initial state of scratch page!\n");
err = -EINVAL;
}
- kunmap(page);
return err;
}
* entries back to scratch.
*/
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset32(vaddr + pte, scratch_pte, count);
- kunmap_atomic(vaddr);
pte = 0;
}
GEM_BUG_ON(!pd->entry[act_pt]);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
do {
GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
}
if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
act_pte = 0;
}
} while (1);
- kunmap_atomic(vaddr);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
goto err_scratch0;
}
- ret = pin_pt_dma(vm, vm->scratch[1]);
+ ret = map_pt_dma(vm, vm->scratch[1]);
if (ret)
goto err_scratch1;
atomic_read(&pt->used));
GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset64(vaddr + gen8_pd_index(start, 0),
vm->scratch[0]->encode,
count);
- kunmap_atomic(vaddr);
atomic_sub(count, &pt->used);
start += count;
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
}
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
return idx;
}
encode |= GEN8_PDE_PS_2M;
page_size = I915_GTT_PAGE_SIZE_2M;
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
} else {
struct i915_page_table *pt =
i915_pt_entry(pd, __gen8_pte_index(start, 1));
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
maybe_64K = __gen8_pte_index(start, 1);
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
}
do {
} while (rem >= page_size && index < I915_PDES);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
!iter->sg && IS_ALIGNED(vma->node.start +
vma->node.size,
I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
u16 i;
encode = vma->vm->scratch[0]->encode;
- vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+ vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
- kunmap_atomic(vaddr);
}
}
if (IS_ERR(obj))
goto free_scratch;
- ret = pin_pt_dma(vm, obj);
+ ret = map_pt_dma(vm, obj);
if (ret) {
i915_gem_object_put(obj);
goto free_scratch;
if (IS_ERR(pde))
return PTR_ERR(pde);
- err = pin_pt_dma(vm, pde->pt.base);
+ err = map_pt_dma(vm, pde->pt.base);
if (err) {
free_pd(vm, pde);
return err;
goto err_pd;
}
- err = pin_pt_dma(vm, pd->pt.base);
+ err = map_pt_dma(vm, pd->pt.base);
if (err)
goto err_pd;
goto err_ppgtt;
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
return obj;
}
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ void *vaddr;
- i915_gem_object_lock(obj, NULL);
- err = i915_gem_object_pin_pages(obj);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ void *vaddr;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
+void *__px_vaddr(struct drm_i915_gem_object *p)
+{
+ enum i915_map_type type;
+
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return page_unpack_bits(p->mm.mapping, &type);
+}
+
dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
- struct page *page = __px_page(p);
- void *vaddr;
+ void *vaddr = __px_vaddr(p);
- vaddr = kmap(page);
memset64(vaddr, val, count);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap(page);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
- struct sgt_iter sgt;
- struct page *page;
+ void *vaddr = __px_vaddr(scratch);
u8 val;
val = 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
val = POISON_FREE;
- for_each_sgt_page(page, sgt, scratch->mm.pages) {
- void *vaddr;
-
- vaddr = kmap(page);
- memset(vaddr, val, PAGE_SIZE);
- kunmap(page);
- }
+ memset(vaddr, val, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
if (IS_ERR(obj))
goto skip;
- if (pin_pt_dma(vm, obj))
+ if (map_pt_dma(vm, obj))
goto skip_obj;
/* We need a single contiguous page for our scratch */
dma_addr_t __px_dma(struct drm_i915_gem_object *p);
#define px_dma(px) (__px_dma(px_base(px)))
+void *__px_vaddr(struct drm_i915_gem_object *p);
+#define px_vaddr(px) (__px_vaddr(px_base(px)))
+
#define px_pt(px) \
__px_choose_expr(px, struct i915_page_table *, __x, \
__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px)))
-
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
struct i915_page_directory *__alloc_pd(int npde);
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
u64 size);
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
const unsigned short idx,
const u64 encoded_entry)
{
- u64 * const vaddr = kmap_atomic(__px_page(pdma));
+ u64 * const vaddr = __px_vaddr(pdma);
vaddr[idx] = encoded_entry;
clflush_cache_range(&vaddr[idx], sizeof(u64));
- kunmap_atomic(vaddr);
}
void
return 0;
}
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash)
{
struct i915_page_table *pt;
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
- err = pin_pt_dma_locked(vm, pt->base);
+ err = map_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
if (err)
goto err_fence;
- err = i915_vm_pin_pt_stash(vma->vm,
- &work->stash);
+ err = i915_vm_map_pt_stash(vma->vm, &work->stash);
if (err)
goto err_fence;
}
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
BIT_ULL(size)))
goto alloc_vm_end;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash,
addr, BIT_ULL(size));
-
i915_vm_free_pt_stash(vm, &stash);
alloc_vm_end:
if (err == -EDEADLK) {
if (err)
goto end_ww;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash, offset, chunk_size);
-
i915_vm_free_pt_stash(vm, &stash);
end_ww:
if (err == -EDEADLK) {
}
/* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
- scratch = kmap(__px_page(ce->vm->scratch[0]));
+ scratch = __px_vaddr(ce->vm->scratch[0]);
memset(scratch, POISON_FREE, PAGE_SIZE);
rq = intel_context_create_request(ce);
out_rq:
i915_request_put(rq);
out_ce:
- kunmap(__px_page(ce->vm->scratch[0]));
intel_context_put(ce);
out:
stream_destroy(stream);