drm/i915/bdw: Support 64b relocations
authorBen Widawsky <benjamin.widawsky@intel.com>
Sun, 3 Nov 2013 04:07:11 +0000 (21:07 -0700)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 8 Nov 2013 17:09:41 +0000 (18:09 +0100)
We don't actually return any to userspace yet, however we can pretend
like we do now so userspace will support it when it happens.

This is just to please Chris as the code itself isn't ready for > 64b
relocations.

v2: Rebase on top of the refactored relocate_entry_gtt|cpu functions.

v3: Squash in fixup from Rafal Barbalho for 64 byte relocs using cpu
relocs and those crossing a page boundary.

v4: Squash in a fixup for the fixup from Rafael.

Signed-off-by: Ben Widawsky <ben@bwidawsk.net> (v1)
Signed-off-by: Barbalho, Rafael <rafael.barbalho@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem_execbuffer.c

index 0ce0d47..78786c4 100644 (file)
@@ -212,6 +212,7 @@ static int
 relocate_entry_cpu(struct drm_i915_gem_object *obj,
                   struct drm_i915_gem_relocation_entry *reloc)
 {
+       struct drm_device *dev = obj->base.dev;
        uint32_t page_offset = offset_in_page(reloc->offset);
        char *vaddr;
        int ret = -EINVAL;
@@ -223,6 +224,19 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
        vaddr = kmap_atomic(i915_gem_object_get_page(obj,
                                reloc->offset >> PAGE_SHIFT));
        *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+
+       if (INTEL_INFO(dev)->gen >= 8) {
+               page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+               if (page_offset == 0) {
+                       kunmap_atomic(vaddr);
+                       vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+                           (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+               }
+
+               *(uint32_t *)(vaddr + page_offset) = 0;
+       }
+
        kunmap_atomic(vaddr);
 
        return 0;
@@ -253,6 +267,21 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
        reloc_entry = (uint32_t __iomem *)
                (reloc_page + offset_in_page(reloc->offset));
        iowrite32(reloc->delta, reloc_entry);
+
+       if (INTEL_INFO(dev)->gen >= 8) {
+               reloc_entry += 1;
+
+               if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+                       io_mapping_unmap_atomic(reloc_page);
+                       reloc_page = io_mapping_map_atomic_wc(
+                                       dev_priv->gtt.mappable,
+                                       reloc->offset + sizeof(uint32_t));
+                       reloc_entry = reloc_page;
+               }
+
+               iowrite32(0, reloc_entry);
+       }
+
        io_mapping_unmap_atomic(reloc_page);
 
        return 0;
@@ -323,7 +352,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                return 0;
 
        /* Check that the relocation address is valid... */
-       if (unlikely(reloc->offset > obj->base.size - 4)) {
+       if (unlikely(reloc->offset >
+               obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
                DRM_DEBUG("Relocation beyond object bounds: "
                          "obj %p target %d offset %d size %d.\n",
                          obj, reloc->target_handle,