if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
return NULL;
+ if (GEM_WARN_ON(type == I915_MAP_WC &&
+ !static_cpu_has(X86_FEATURE_PAT)))
+ return NULL;
+
/* A single page can always be kmapped */
- if (n_pte == 1 && type == I915_MAP_WB)
- return kmap(sg_page(sgt->sgl));
+ if (n_pte == 1 && type == I915_MAP_WB) {
+ struct page *page = sg_page(sgt->sgl);
+
+ /*
+ * On 32b, highmem using a finite set of indirect PTE (i.e.
+ * vmap) to provide virtual mappings of the high pages.
+ * As these are finite, map_new_virtual() must wait for some
+ * other kmap() to finish when it runs out. If we map a large
+ * number of objects, there is no method for it to tell us
+ * to release the mappings, and we deadlock.
+ *
+ * However, if we make an explicit vmap of the page, that
+ * uses a larger vmalloc arena, and also has the ability
+ * to tell us to release unwanted mappings. Most importantly,
+ * it will fail and propagate an error instead of waiting
+ * forever.
+ *
+ * So if the page is beyond the 32b boundary, make an explicit
+ * vmap. On 64b, this check will be optimised away as we can
+ * directly kmap any page on the system.
+ */
+ if (!PageHighMem(page))
+ return kmap(page);
+ }
mem = stack;
if (n_pte > ARRAY_SIZE(stack)) {
return nth_page(sg_page(sg), offset);
}
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,