Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/pci.h>
43 #include <linux/dma-buf.h>
44
45 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
47
48 static bool cpu_cache_is_coherent(struct drm_device *dev,
49                                   enum i915_cache_level level)
50 {
51         return HAS_LLC(dev) || level != I915_CACHE_NONE;
52 }
53
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
55 {
56         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
57                 return false;
58
59         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
60                 return true;
61
62         return obj->pin_display;
63 }
64
65 static int
66 insert_mappable_node(struct drm_i915_private *i915,
67                      struct drm_mm_node *node, u32 size)
68 {
69         memset(node, 0, sizeof(*node));
70         return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
71                                                    size, 0, 0, 0,
72                                                    i915->ggtt.mappable_end,
73                                                    DRM_MM_SEARCH_DEFAULT,
74                                                    DRM_MM_CREATE_DEFAULT);
75 }
76
77 static void
78 remove_mappable_node(struct drm_mm_node *node)
79 {
80         drm_mm_remove_node(node);
81 }
82
83 /* some bookkeeping */
84 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
85                                   u64 size)
86 {
87         spin_lock(&dev_priv->mm.object_stat_lock);
88         dev_priv->mm.object_count++;
89         dev_priv->mm.object_memory += size;
90         spin_unlock(&dev_priv->mm.object_stat_lock);
91 }
92
93 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
94                                      u64 size)
95 {
96         spin_lock(&dev_priv->mm.object_stat_lock);
97         dev_priv->mm.object_count--;
98         dev_priv->mm.object_memory -= size;
99         spin_unlock(&dev_priv->mm.object_stat_lock);
100 }
101
102 static int
103 i915_gem_wait_for_error(struct i915_gpu_error *error)
104 {
105         int ret;
106
107         if (!i915_reset_in_progress(error))
108                 return 0;
109
110         /*
111          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112          * userspace. If it takes that long something really bad is going on and
113          * we should simply try to bail out and fail as gracefully as possible.
114          */
115         ret = wait_event_interruptible_timeout(error->reset_queue,
116                                                !i915_reset_in_progress(error),
117                                                10*HZ);
118         if (ret == 0) {
119                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
120                 return -EIO;
121         } else if (ret < 0) {
122                 return ret;
123         } else {
124                 return 0;
125         }
126 }
127
128 int i915_mutex_lock_interruptible(struct drm_device *dev)
129 {
130         struct drm_i915_private *dev_priv = to_i915(dev);
131         int ret;
132
133         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
134         if (ret)
135                 return ret;
136
137         ret = mutex_lock_interruptible(&dev->struct_mutex);
138         if (ret)
139                 return ret;
140
141         return 0;
142 }
143
144 int
145 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
146                             struct drm_file *file)
147 {
148         struct drm_i915_private *dev_priv = to_i915(dev);
149         struct i915_ggtt *ggtt = &dev_priv->ggtt;
150         struct drm_i915_gem_get_aperture *args = data;
151         struct i915_vma *vma;
152         size_t pinned;
153
154         pinned = 0;
155         mutex_lock(&dev->struct_mutex);
156         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
157                 if (i915_vma_is_pinned(vma))
158                         pinned += vma->node.size;
159         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
160                 if (i915_vma_is_pinned(vma))
161                         pinned += vma->node.size;
162         mutex_unlock(&dev->struct_mutex);
163
164         args->aper_size = ggtt->base.total;
165         args->aper_available_size = args->aper_size - pinned;
166
167         return 0;
168 }
169
170 static int
171 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
172 {
173         struct address_space *mapping = obj->base.filp->f_mapping;
174         char *vaddr = obj->phys_handle->vaddr;
175         struct sg_table *st;
176         struct scatterlist *sg;
177         int i;
178
179         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
180                 return -EINVAL;
181
182         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
183                 struct page *page;
184                 char *src;
185
186                 page = shmem_read_mapping_page(mapping, i);
187                 if (IS_ERR(page))
188                         return PTR_ERR(page);
189
190                 src = kmap_atomic(page);
191                 memcpy(vaddr, src, PAGE_SIZE);
192                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
193                 kunmap_atomic(src);
194
195                 put_page(page);
196                 vaddr += PAGE_SIZE;
197         }
198
199         i915_gem_chipset_flush(to_i915(obj->base.dev));
200
201         st = kmalloc(sizeof(*st), GFP_KERNEL);
202         if (st == NULL)
203                 return -ENOMEM;
204
205         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
206                 kfree(st);
207                 return -ENOMEM;
208         }
209
210         sg = st->sgl;
211         sg->offset = 0;
212         sg->length = obj->base.size;
213
214         sg_dma_address(sg) = obj->phys_handle->busaddr;
215         sg_dma_len(sg) = obj->base.size;
216
217         obj->pages = st;
218         return 0;
219 }
220
221 static void
222 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
223 {
224         int ret;
225
226         BUG_ON(obj->madv == __I915_MADV_PURGED);
227
228         ret = i915_gem_object_set_to_cpu_domain(obj, true);
229         if (WARN_ON(ret)) {
230                 /* In the event of a disaster, abandon all caches and
231                  * hope for the best.
232                  */
233                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
234         }
235
236         if (obj->madv == I915_MADV_DONTNEED)
237                 obj->dirty = 0;
238
239         if (obj->dirty) {
240                 struct address_space *mapping = obj->base.filp->f_mapping;
241                 char *vaddr = obj->phys_handle->vaddr;
242                 int i;
243
244                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
245                         struct page *page;
246                         char *dst;
247
248                         page = shmem_read_mapping_page(mapping, i);
249                         if (IS_ERR(page))
250                                 continue;
251
252                         dst = kmap_atomic(page);
253                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
254                         memcpy(dst, vaddr, PAGE_SIZE);
255                         kunmap_atomic(dst);
256
257                         set_page_dirty(page);
258                         if (obj->madv == I915_MADV_WILLNEED)
259                                 mark_page_accessed(page);
260                         put_page(page);
261                         vaddr += PAGE_SIZE;
262                 }
263                 obj->dirty = 0;
264         }
265
266         sg_free_table(obj->pages);
267         kfree(obj->pages);
268 }
269
270 static void
271 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
272 {
273         drm_pci_free(obj->base.dev, obj->phys_handle);
274 }
275
276 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277         .get_pages = i915_gem_object_get_pages_phys,
278         .put_pages = i915_gem_object_put_pages_phys,
279         .release = i915_gem_object_release_phys,
280 };
281
282 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
283 {
284         struct i915_vma *vma;
285         LIST_HEAD(still_in_list);
286         int ret;
287
288         lockdep_assert_held(&obj->base.dev->struct_mutex);
289
290         /* Closed vma are removed from the obj->vma_list - but they may
291          * still have an active binding on the object. To remove those we
292          * must wait for all rendering to complete to the object (as unbinding
293          * must anyway), and retire the requests.
294          */
295         ret = i915_gem_object_wait_rendering(obj, false);
296         if (ret)
297                 return ret;
298
299         i915_gem_retire_requests(to_i915(obj->base.dev));
300
301         while ((vma = list_first_entry_or_null(&obj->vma_list,
302                                                struct i915_vma,
303                                                obj_link))) {
304                 list_move_tail(&vma->obj_link, &still_in_list);
305                 ret = i915_vma_unbind(vma);
306                 if (ret)
307                         break;
308         }
309         list_splice(&still_in_list, &obj->vma_list);
310
311         return ret;
312 }
313
314 /**
315  * Ensures that all rendering to the object has completed and the object is
316  * safe to unbind from the GTT or access from the CPU.
317  * @obj: i915 gem object
318  * @readonly: waiting for just read access or read-write access
319  */
320 int
321 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
322                                bool readonly)
323 {
324         struct reservation_object *resv;
325         struct i915_gem_active *active;
326         unsigned long active_mask;
327         int idx;
328
329         lockdep_assert_held(&obj->base.dev->struct_mutex);
330
331         if (!readonly) {
332                 active = obj->last_read;
333                 active_mask = i915_gem_object_get_active(obj);
334         } else {
335                 active_mask = 1;
336                 active = &obj->last_write;
337         }
338
339         for_each_active(active_mask, idx) {
340                 int ret;
341
342                 ret = i915_gem_active_wait(&active[idx],
343                                            &obj->base.dev->struct_mutex);
344                 if (ret)
345                         return ret;
346         }
347
348         resv = i915_gem_object_get_dmabuf_resv(obj);
349         if (resv) {
350                 long err;
351
352                 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
353                                                           MAX_SCHEDULE_TIMEOUT);
354                 if (err < 0)
355                         return err;
356         }
357
358         return 0;
359 }
360
361 /* A nonblocking variant of the above wait. Must be called prior to
362  * acquiring the mutex for the object, as the object state may change
363  * during this call. A reference must be held by the caller for the object.
364  */
365 static __must_check int
366 __unsafe_wait_rendering(struct drm_i915_gem_object *obj,
367                         struct intel_rps_client *rps,
368                         bool readonly)
369 {
370         struct i915_gem_active *active;
371         unsigned long active_mask;
372         int idx;
373
374         active_mask = __I915_BO_ACTIVE(obj);
375         if (!active_mask)
376                 return 0;
377
378         if (!readonly) {
379                 active = obj->last_read;
380         } else {
381                 active_mask = 1;
382                 active = &obj->last_write;
383         }
384
385         for_each_active(active_mask, idx) {
386                 int ret;
387
388                 ret = i915_gem_active_wait_unlocked(&active[idx],
389                                                     I915_WAIT_INTERRUPTIBLE,
390                                                     NULL, rps);
391                 if (ret)
392                         return ret;
393         }
394
395         return 0;
396 }
397
398 static struct intel_rps_client *to_rps_client(struct drm_file *file)
399 {
400         struct drm_i915_file_private *fpriv = file->driver_priv;
401
402         return &fpriv->rps;
403 }
404
405 int
406 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
407                             int align)
408 {
409         drm_dma_handle_t *phys;
410         int ret;
411
412         if (obj->phys_handle) {
413                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
414                         return -EBUSY;
415
416                 return 0;
417         }
418
419         if (obj->madv != I915_MADV_WILLNEED)
420                 return -EFAULT;
421
422         if (obj->base.filp == NULL)
423                 return -EINVAL;
424
425         ret = i915_gem_object_unbind(obj);
426         if (ret)
427                 return ret;
428
429         ret = i915_gem_object_put_pages(obj);
430         if (ret)
431                 return ret;
432
433         /* create a new object */
434         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
435         if (!phys)
436                 return -ENOMEM;
437
438         obj->phys_handle = phys;
439         obj->ops = &i915_gem_phys_ops;
440
441         return i915_gem_object_get_pages(obj);
442 }
443
444 static int
445 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
446                      struct drm_i915_gem_pwrite *args,
447                      struct drm_file *file_priv)
448 {
449         struct drm_device *dev = obj->base.dev;
450         void *vaddr = obj->phys_handle->vaddr + args->offset;
451         char __user *user_data = u64_to_user_ptr(args->data_ptr);
452         int ret = 0;
453
454         /* We manually control the domain here and pretend that it
455          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
456          */
457         ret = i915_gem_object_wait_rendering(obj, false);
458         if (ret)
459                 return ret;
460
461         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
462         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
463                 unsigned long unwritten;
464
465                 /* The physical object once assigned is fixed for the lifetime
466                  * of the obj, so we can safely drop the lock and continue
467                  * to access vaddr.
468                  */
469                 mutex_unlock(&dev->struct_mutex);
470                 unwritten = copy_from_user(vaddr, user_data, args->size);
471                 mutex_lock(&dev->struct_mutex);
472                 if (unwritten) {
473                         ret = -EFAULT;
474                         goto out;
475                 }
476         }
477
478         drm_clflush_virt_range(vaddr, args->size);
479         i915_gem_chipset_flush(to_i915(dev));
480
481 out:
482         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
483         return ret;
484 }
485
486 void *i915_gem_object_alloc(struct drm_device *dev)
487 {
488         struct drm_i915_private *dev_priv = to_i915(dev);
489         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
490 }
491
492 void i915_gem_object_free(struct drm_i915_gem_object *obj)
493 {
494         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
495         kmem_cache_free(dev_priv->objects, obj);
496 }
497
498 static int
499 i915_gem_create(struct drm_file *file,
500                 struct drm_device *dev,
501                 uint64_t size,
502                 uint32_t *handle_p)
503 {
504         struct drm_i915_gem_object *obj;
505         int ret;
506         u32 handle;
507
508         size = roundup(size, PAGE_SIZE);
509         if (size == 0)
510                 return -EINVAL;
511
512         /* Allocate the new object */
513         obj = i915_gem_object_create(dev, size);
514         if (IS_ERR(obj))
515                 return PTR_ERR(obj);
516
517         ret = drm_gem_handle_create(file, &obj->base, &handle);
518         /* drop reference from allocate - handle holds it now */
519         i915_gem_object_put_unlocked(obj);
520         if (ret)
521                 return ret;
522
523         *handle_p = handle;
524         return 0;
525 }
526
527 int
528 i915_gem_dumb_create(struct drm_file *file,
529                      struct drm_device *dev,
530                      struct drm_mode_create_dumb *args)
531 {
532         /* have to work out size/pitch and return them */
533         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
534         args->size = args->pitch * args->height;
535         return i915_gem_create(file, dev,
536                                args->size, &args->handle);
537 }
538
539 /**
540  * Creates a new mm object and returns a handle to it.
541  * @dev: drm device pointer
542  * @data: ioctl data blob
543  * @file: drm file pointer
544  */
545 int
546 i915_gem_create_ioctl(struct drm_device *dev, void *data,
547                       struct drm_file *file)
548 {
549         struct drm_i915_gem_create *args = data;
550
551         return i915_gem_create(file, dev,
552                                args->size, &args->handle);
553 }
554
555 static inline int
556 __copy_to_user_swizzled(char __user *cpu_vaddr,
557                         const char *gpu_vaddr, int gpu_offset,
558                         int length)
559 {
560         int ret, cpu_offset = 0;
561
562         while (length > 0) {
563                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
564                 int this_length = min(cacheline_end - gpu_offset, length);
565                 int swizzled_gpu_offset = gpu_offset ^ 64;
566
567                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
568                                      gpu_vaddr + swizzled_gpu_offset,
569                                      this_length);
570                 if (ret)
571                         return ret + length;
572
573                 cpu_offset += this_length;
574                 gpu_offset += this_length;
575                 length -= this_length;
576         }
577
578         return 0;
579 }
580
581 static inline int
582 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
583                           const char __user *cpu_vaddr,
584                           int length)
585 {
586         int ret, cpu_offset = 0;
587
588         while (length > 0) {
589                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
590                 int this_length = min(cacheline_end - gpu_offset, length);
591                 int swizzled_gpu_offset = gpu_offset ^ 64;
592
593                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
594                                        cpu_vaddr + cpu_offset,
595                                        this_length);
596                 if (ret)
597                         return ret + length;
598
599                 cpu_offset += this_length;
600                 gpu_offset += this_length;
601                 length -= this_length;
602         }
603
604         return 0;
605 }
606
607 /*
608  * Pins the specified object's pages and synchronizes the object with
609  * GPU accesses. Sets needs_clflush to non-zero if the caller should
610  * flush the object from the CPU cache.
611  */
612 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
613                                     unsigned int *needs_clflush)
614 {
615         int ret;
616
617         *needs_clflush = 0;
618
619         if (!i915_gem_object_has_struct_page(obj))
620                 return -ENODEV;
621
622         ret = i915_gem_object_wait_rendering(obj, true);
623         if (ret)
624                 return ret;
625
626         ret = i915_gem_object_get_pages(obj);
627         if (ret)
628                 return ret;
629
630         i915_gem_object_pin_pages(obj);
631
632         i915_gem_object_flush_gtt_write_domain(obj);
633
634         /* If we're not in the cpu read domain, set ourself into the gtt
635          * read domain and manually flush cachelines (if required). This
636          * optimizes for the case when the gpu will dirty the data
637          * anyway again before the next pread happens.
638          */
639         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
640                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
641                                                         obj->cache_level);
642
643         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
644                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
645                 if (ret)
646                         goto err_unpin;
647
648                 *needs_clflush = 0;
649         }
650
651         /* return with the pages pinned */
652         return 0;
653
654 err_unpin:
655         i915_gem_object_unpin_pages(obj);
656         return ret;
657 }
658
659 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
660                                      unsigned int *needs_clflush)
661 {
662         int ret;
663
664         *needs_clflush = 0;
665         if (!i915_gem_object_has_struct_page(obj))
666                 return -ENODEV;
667
668         ret = i915_gem_object_wait_rendering(obj, false);
669         if (ret)
670                 return ret;
671
672         ret = i915_gem_object_get_pages(obj);
673         if (ret)
674                 return ret;
675
676         i915_gem_object_pin_pages(obj);
677
678         i915_gem_object_flush_gtt_write_domain(obj);
679
680         /* If we're not in the cpu write domain, set ourself into the
681          * gtt write domain and manually flush cachelines (as required).
682          * This optimizes for the case when the gpu will use the data
683          * right away and we therefore have to clflush anyway.
684          */
685         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
686                 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
687
688         /* Same trick applies to invalidate partially written cachelines read
689          * before writing.
690          */
691         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
692                 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
693                                                          obj->cache_level);
694
695         if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
696                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
697                 if (ret)
698                         goto err_unpin;
699
700                 *needs_clflush = 0;
701         }
702
703         if ((*needs_clflush & CLFLUSH_AFTER) == 0)
704                 obj->cache_dirty = true;
705
706         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
707         obj->dirty = 1;
708         /* return with the pages pinned */
709         return 0;
710
711 err_unpin:
712         i915_gem_object_unpin_pages(obj);
713         return ret;
714 }
715
716 /* Per-page copy function for the shmem pread fastpath.
717  * Flushes invalid cachelines before reading the target if
718  * needs_clflush is set. */
719 static int
720 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
721                  char __user *user_data,
722                  bool page_do_bit17_swizzling, bool needs_clflush)
723 {
724         char *vaddr;
725         int ret;
726
727         if (unlikely(page_do_bit17_swizzling))
728                 return -EINVAL;
729
730         vaddr = kmap_atomic(page);
731         if (needs_clflush)
732                 drm_clflush_virt_range(vaddr + shmem_page_offset,
733                                        page_length);
734         ret = __copy_to_user_inatomic(user_data,
735                                       vaddr + shmem_page_offset,
736                                       page_length);
737         kunmap_atomic(vaddr);
738
739         return ret ? -EFAULT : 0;
740 }
741
742 static void
743 shmem_clflush_swizzled_range(char *addr, unsigned long length,
744                              bool swizzled)
745 {
746         if (unlikely(swizzled)) {
747                 unsigned long start = (unsigned long) addr;
748                 unsigned long end = (unsigned long) addr + length;
749
750                 /* For swizzling simply ensure that we always flush both
751                  * channels. Lame, but simple and it works. Swizzled
752                  * pwrite/pread is far from a hotpath - current userspace
753                  * doesn't use it at all. */
754                 start = round_down(start, 128);
755                 end = round_up(end, 128);
756
757                 drm_clflush_virt_range((void *)start, end - start);
758         } else {
759                 drm_clflush_virt_range(addr, length);
760         }
761
762 }
763
764 /* Only difference to the fast-path function is that this can handle bit17
765  * and uses non-atomic copy and kmap functions. */
766 static int
767 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
768                  char __user *user_data,
769                  bool page_do_bit17_swizzling, bool needs_clflush)
770 {
771         char *vaddr;
772         int ret;
773
774         vaddr = kmap(page);
775         if (needs_clflush)
776                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
777                                              page_length,
778                                              page_do_bit17_swizzling);
779
780         if (page_do_bit17_swizzling)
781                 ret = __copy_to_user_swizzled(user_data,
782                                               vaddr, shmem_page_offset,
783                                               page_length);
784         else
785                 ret = __copy_to_user(user_data,
786                                      vaddr + shmem_page_offset,
787                                      page_length);
788         kunmap(page);
789
790         return ret ? - EFAULT : 0;
791 }
792
793 static inline unsigned long
794 slow_user_access(struct io_mapping *mapping,
795                  uint64_t page_base, int page_offset,
796                  char __user *user_data,
797                  unsigned long length, bool pwrite)
798 {
799         void __iomem *ioaddr;
800         void *vaddr;
801         uint64_t unwritten;
802
803         ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
804         /* We can use the cpu mem copy function because this is X86. */
805         vaddr = (void __force *)ioaddr + page_offset;
806         if (pwrite)
807                 unwritten = __copy_from_user(vaddr, user_data, length);
808         else
809                 unwritten = __copy_to_user(user_data, vaddr, length);
810
811         io_mapping_unmap(ioaddr);
812         return unwritten;
813 }
814
815 static int
816 i915_gem_gtt_pread(struct drm_device *dev,
817                    struct drm_i915_gem_object *obj, uint64_t size,
818                    uint64_t data_offset, uint64_t data_ptr)
819 {
820         struct drm_i915_private *dev_priv = to_i915(dev);
821         struct i915_ggtt *ggtt = &dev_priv->ggtt;
822         struct i915_vma *vma;
823         struct drm_mm_node node;
824         char __user *user_data;
825         uint64_t remain;
826         uint64_t offset;
827         int ret;
828
829         intel_runtime_pm_get(to_i915(dev));
830         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
831         if (!IS_ERR(vma)) {
832                 node.start = i915_ggtt_offset(vma);
833                 node.allocated = false;
834                 ret = i915_vma_put_fence(vma);
835                 if (ret) {
836                         i915_vma_unpin(vma);
837                         vma = ERR_PTR(ret);
838                 }
839         }
840         if (IS_ERR(vma)) {
841                 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
842                 if (ret)
843                         goto out;
844
845                 ret = i915_gem_object_get_pages(obj);
846                 if (ret) {
847                         remove_mappable_node(&node);
848                         goto out;
849                 }
850
851                 i915_gem_object_pin_pages(obj);
852         }
853
854         ret = i915_gem_object_set_to_gtt_domain(obj, false);
855         if (ret)
856                 goto out_unpin;
857
858         user_data = u64_to_user_ptr(data_ptr);
859         remain = size;
860         offset = data_offset;
861
862         mutex_unlock(&dev->struct_mutex);
863         if (likely(!i915.prefault_disable)) {
864                 ret = fault_in_pages_writeable(user_data, remain);
865                 if (ret) {
866                         mutex_lock(&dev->struct_mutex);
867                         goto out_unpin;
868                 }
869         }
870
871         while (remain > 0) {
872                 /* Operation in this page
873                  *
874                  * page_base = page offset within aperture
875                  * page_offset = offset within page
876                  * page_length = bytes to copy for this page
877                  */
878                 u32 page_base = node.start;
879                 unsigned page_offset = offset_in_page(offset);
880                 unsigned page_length = PAGE_SIZE - page_offset;
881                 page_length = remain < page_length ? remain : page_length;
882                 if (node.allocated) {
883                         wmb();
884                         ggtt->base.insert_page(&ggtt->base,
885                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
886                                                node.start,
887                                                I915_CACHE_NONE, 0);
888                         wmb();
889                 } else {
890                         page_base += offset & PAGE_MASK;
891                 }
892                 /* This is a slow read/write as it tries to read from
893                  * and write to user memory which may result into page
894                  * faults, and so we cannot perform this under struct_mutex.
895                  */
896                 if (slow_user_access(&ggtt->mappable, page_base,
897                                      page_offset, user_data,
898                                      page_length, false)) {
899                         ret = -EFAULT;
900                         break;
901                 }
902
903                 remain -= page_length;
904                 user_data += page_length;
905                 offset += page_length;
906         }
907
908         mutex_lock(&dev->struct_mutex);
909         if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
910                 /* The user has modified the object whilst we tried
911                  * reading from it, and we now have no idea what domain
912                  * the pages should be in. As we have just been touching
913                  * them directly, flush everything back to the GTT
914                  * domain.
915                  */
916                 ret = i915_gem_object_set_to_gtt_domain(obj, false);
917         }
918
919 out_unpin:
920         if (node.allocated) {
921                 wmb();
922                 ggtt->base.clear_range(&ggtt->base,
923                                        node.start, node.size);
924                 i915_gem_object_unpin_pages(obj);
925                 remove_mappable_node(&node);
926         } else {
927                 i915_vma_unpin(vma);
928         }
929 out:
930         intel_runtime_pm_put(to_i915(dev));
931         return ret;
932 }
933
934 static int
935 i915_gem_shmem_pread(struct drm_device *dev,
936                      struct drm_i915_gem_object *obj,
937                      struct drm_i915_gem_pread *args,
938                      struct drm_file *file)
939 {
940         char __user *user_data;
941         ssize_t remain;
942         loff_t offset;
943         int shmem_page_offset, page_length, ret = 0;
944         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
945         int prefaulted = 0;
946         int needs_clflush = 0;
947         struct sg_page_iter sg_iter;
948
949         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
950         if (ret)
951                 return ret;
952
953         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
954         user_data = u64_to_user_ptr(args->data_ptr);
955         offset = args->offset;
956         remain = args->size;
957
958         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
959                          offset >> PAGE_SHIFT) {
960                 struct page *page = sg_page_iter_page(&sg_iter);
961
962                 if (remain <= 0)
963                         break;
964
965                 /* Operation in this page
966                  *
967                  * shmem_page_offset = offset within page in shmem file
968                  * page_length = bytes to copy for this page
969                  */
970                 shmem_page_offset = offset_in_page(offset);
971                 page_length = remain;
972                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
973                         page_length = PAGE_SIZE - shmem_page_offset;
974
975                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
976                         (page_to_phys(page) & (1 << 17)) != 0;
977
978                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
979                                        user_data, page_do_bit17_swizzling,
980                                        needs_clflush);
981                 if (ret == 0)
982                         goto next_page;
983
984                 mutex_unlock(&dev->struct_mutex);
985
986                 if (likely(!i915.prefault_disable) && !prefaulted) {
987                         ret = fault_in_pages_writeable(user_data, remain);
988                         /* Userspace is tricking us, but we've already clobbered
989                          * its pages with the prefault and promised to write the
990                          * data up to the first fault. Hence ignore any errors
991                          * and just continue. */
992                         (void)ret;
993                         prefaulted = 1;
994                 }
995
996                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
997                                        user_data, page_do_bit17_swizzling,
998                                        needs_clflush);
999
1000                 mutex_lock(&dev->struct_mutex);
1001
1002                 if (ret)
1003                         goto out;
1004
1005 next_page:
1006                 remain -= page_length;
1007                 user_data += page_length;
1008                 offset += page_length;
1009         }
1010
1011 out:
1012         i915_gem_obj_finish_shmem_access(obj);
1013
1014         return ret;
1015 }
1016
1017 /**
1018  * Reads data from the object referenced by handle.
1019  * @dev: drm device pointer
1020  * @data: ioctl data blob
1021  * @file: drm file pointer
1022  *
1023  * On error, the contents of *data are undefined.
1024  */
1025 int
1026 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1027                      struct drm_file *file)
1028 {
1029         struct drm_i915_gem_pread *args = data;
1030         struct drm_i915_gem_object *obj;
1031         int ret = 0;
1032
1033         if (args->size == 0)
1034                 return 0;
1035
1036         if (!access_ok(VERIFY_WRITE,
1037                        u64_to_user_ptr(args->data_ptr),
1038                        args->size))
1039                 return -EFAULT;
1040
1041         obj = i915_gem_object_lookup(file, args->handle);
1042         if (!obj)
1043                 return -ENOENT;
1044
1045         /* Bounds check source.  */
1046         if (args->offset > obj->base.size ||
1047             args->size > obj->base.size - args->offset) {
1048                 ret = -EINVAL;
1049                 goto err;
1050         }
1051
1052         trace_i915_gem_object_pread(obj, args->offset, args->size);
1053
1054         ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
1055         if (ret)
1056                 goto err;
1057
1058         ret = i915_mutex_lock_interruptible(dev);
1059         if (ret)
1060                 goto err;
1061
1062         ret = i915_gem_shmem_pread(dev, obj, args, file);
1063
1064         /* pread for non shmem backed objects */
1065         if (ret == -EFAULT || ret == -ENODEV)
1066                 ret = i915_gem_gtt_pread(dev, obj, args->size,
1067                                         args->offset, args->data_ptr);
1068
1069         i915_gem_object_put(obj);
1070         mutex_unlock(&dev->struct_mutex);
1071
1072         return ret;
1073
1074 err:
1075         i915_gem_object_put_unlocked(obj);
1076         return ret;
1077 }
1078
1079 /* This is the fast write path which cannot handle
1080  * page faults in the source data
1081  */
1082
1083 static inline int
1084 fast_user_write(struct io_mapping *mapping,
1085                 loff_t page_base, int page_offset,
1086                 char __user *user_data,
1087                 int length)
1088 {
1089         void __iomem *vaddr_atomic;
1090         void *vaddr;
1091         unsigned long unwritten;
1092
1093         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
1094         /* We can use the cpu mem copy function because this is X86. */
1095         vaddr = (void __force*)vaddr_atomic + page_offset;
1096         unwritten = __copy_from_user_inatomic_nocache(vaddr,
1097                                                       user_data, length);
1098         io_mapping_unmap_atomic(vaddr_atomic);
1099         return unwritten;
1100 }
1101
1102 /**
1103  * This is the fast pwrite path, where we copy the data directly from the
1104  * user into the GTT, uncached.
1105  * @i915: i915 device private data
1106  * @obj: i915 gem object
1107  * @args: pwrite arguments structure
1108  * @file: drm file pointer
1109  */
1110 static int
1111 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
1112                          struct drm_i915_gem_object *obj,
1113                          struct drm_i915_gem_pwrite *args,
1114                          struct drm_file *file)
1115 {
1116         struct i915_ggtt *ggtt = &i915->ggtt;
1117         struct drm_device *dev = obj->base.dev;
1118         struct i915_vma *vma;
1119         struct drm_mm_node node;
1120         uint64_t remain, offset;
1121         char __user *user_data;
1122         int ret;
1123         bool hit_slow_path = false;
1124
1125         if (i915_gem_object_is_tiled(obj))
1126                 return -EFAULT;
1127
1128         intel_runtime_pm_get(i915);
1129         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1130                                        PIN_MAPPABLE | PIN_NONBLOCK);
1131         if (!IS_ERR(vma)) {
1132                 node.start = i915_ggtt_offset(vma);
1133                 node.allocated = false;
1134                 ret = i915_vma_put_fence(vma);
1135                 if (ret) {
1136                         i915_vma_unpin(vma);
1137                         vma = ERR_PTR(ret);
1138                 }
1139         }
1140         if (IS_ERR(vma)) {
1141                 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
1142                 if (ret)
1143                         goto out;
1144
1145                 ret = i915_gem_object_get_pages(obj);
1146                 if (ret) {
1147                         remove_mappable_node(&node);
1148                         goto out;
1149                 }
1150
1151                 i915_gem_object_pin_pages(obj);
1152         }
1153
1154         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1155         if (ret)
1156                 goto out_unpin;
1157
1158         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1159         obj->dirty = true;
1160
1161         user_data = u64_to_user_ptr(args->data_ptr);
1162         offset = args->offset;
1163         remain = args->size;
1164         while (remain) {
1165                 /* Operation in this page
1166                  *
1167                  * page_base = page offset within aperture
1168                  * page_offset = offset within page
1169                  * page_length = bytes to copy for this page
1170                  */
1171                 u32 page_base = node.start;
1172                 unsigned page_offset = offset_in_page(offset);
1173                 unsigned page_length = PAGE_SIZE - page_offset;
1174                 page_length = remain < page_length ? remain : page_length;
1175                 if (node.allocated) {
1176                         wmb(); /* flush the write before we modify the GGTT */
1177                         ggtt->base.insert_page(&ggtt->base,
1178                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1179                                                node.start, I915_CACHE_NONE, 0);
1180                         wmb(); /* flush modifications to the GGTT (insert_page) */
1181                 } else {
1182                         page_base += offset & PAGE_MASK;
1183                 }
1184                 /* If we get a fault while copying data, then (presumably) our
1185                  * source page isn't available.  Return the error and we'll
1186                  * retry in the slow path.
1187                  * If the object is non-shmem backed, we retry again with the
1188                  * path that handles page fault.
1189                  */
1190                 if (fast_user_write(&ggtt->mappable, page_base,
1191                                     page_offset, user_data, page_length)) {
1192                         hit_slow_path = true;
1193                         mutex_unlock(&dev->struct_mutex);
1194                         if (slow_user_access(&ggtt->mappable,
1195                                              page_base,
1196                                              page_offset, user_data,
1197                                              page_length, true)) {
1198                                 ret = -EFAULT;
1199                                 mutex_lock(&dev->struct_mutex);
1200                                 goto out_flush;
1201                         }
1202
1203                         mutex_lock(&dev->struct_mutex);
1204                 }
1205
1206                 remain -= page_length;
1207                 user_data += page_length;
1208                 offset += page_length;
1209         }
1210
1211 out_flush:
1212         if (hit_slow_path) {
1213                 if (ret == 0 &&
1214                     (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1215                         /* The user has modified the object whilst we tried
1216                          * reading from it, and we now have no idea what domain
1217                          * the pages should be in. As we have just been touching
1218                          * them directly, flush everything back to the GTT
1219                          * domain.
1220                          */
1221                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1222                 }
1223         }
1224
1225         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1226 out_unpin:
1227         if (node.allocated) {
1228                 wmb();
1229                 ggtt->base.clear_range(&ggtt->base,
1230                                        node.start, node.size);
1231                 i915_gem_object_unpin_pages(obj);
1232                 remove_mappable_node(&node);
1233         } else {
1234                 i915_vma_unpin(vma);
1235         }
1236 out:
1237         intel_runtime_pm_put(i915);
1238         return ret;
1239 }
1240
1241 /* Per-page copy function for the shmem pwrite fastpath.
1242  * Flushes invalid cachelines before writing to the target if
1243  * needs_clflush_before is set and flushes out any written cachelines after
1244  * writing if needs_clflush is set. */
1245 static int
1246 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1247                   char __user *user_data,
1248                   bool page_do_bit17_swizzling,
1249                   bool needs_clflush_before,
1250                   bool needs_clflush_after)
1251 {
1252         char *vaddr;
1253         int ret;
1254
1255         if (unlikely(page_do_bit17_swizzling))
1256                 return -EINVAL;
1257
1258         vaddr = kmap_atomic(page);
1259         if (needs_clflush_before)
1260                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1261                                        page_length);
1262         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1263                                         user_data, page_length);
1264         if (needs_clflush_after)
1265                 drm_clflush_virt_range(vaddr + shmem_page_offset,
1266                                        page_length);
1267         kunmap_atomic(vaddr);
1268
1269         return ret ? -EFAULT : 0;
1270 }
1271
1272 /* Only difference to the fast-path function is that this can handle bit17
1273  * and uses non-atomic copy and kmap functions. */
1274 static int
1275 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1276                   char __user *user_data,
1277                   bool page_do_bit17_swizzling,
1278                   bool needs_clflush_before,
1279                   bool needs_clflush_after)
1280 {
1281         char *vaddr;
1282         int ret;
1283
1284         vaddr = kmap(page);
1285         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1286                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1287                                              page_length,
1288                                              page_do_bit17_swizzling);
1289         if (page_do_bit17_swizzling)
1290                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1291                                                 user_data,
1292                                                 page_length);
1293         else
1294                 ret = __copy_from_user(vaddr + shmem_page_offset,
1295                                        user_data,
1296                                        page_length);
1297         if (needs_clflush_after)
1298                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1299                                              page_length,
1300                                              page_do_bit17_swizzling);
1301         kunmap(page);
1302
1303         return ret ? -EFAULT : 0;
1304 }
1305
1306 static int
1307 i915_gem_shmem_pwrite(struct drm_device *dev,
1308                       struct drm_i915_gem_object *obj,
1309                       struct drm_i915_gem_pwrite *args,
1310                       struct drm_file *file)
1311 {
1312         ssize_t remain;
1313         loff_t offset;
1314         char __user *user_data;
1315         int shmem_page_offset, page_length, ret = 0;
1316         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1317         int hit_slowpath = 0;
1318         unsigned int needs_clflush;
1319         struct sg_page_iter sg_iter;
1320
1321         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1322         if (ret)
1323                 return ret;
1324
1325         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1326         user_data = u64_to_user_ptr(args->data_ptr);
1327         offset = args->offset;
1328         remain = args->size;
1329
1330         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1331                          offset >> PAGE_SHIFT) {
1332                 struct page *page = sg_page_iter_page(&sg_iter);
1333                 int partial_cacheline_write;
1334
1335                 if (remain <= 0)
1336                         break;
1337
1338                 /* Operation in this page
1339                  *
1340                  * shmem_page_offset = offset within page in shmem file
1341                  * page_length = bytes to copy for this page
1342                  */
1343                 shmem_page_offset = offset_in_page(offset);
1344
1345                 page_length = remain;
1346                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1347                         page_length = PAGE_SIZE - shmem_page_offset;
1348
1349                 /* If we don't overwrite a cacheline completely we need to be
1350                  * careful to have up-to-date data by first clflushing. Don't
1351                  * overcomplicate things and flush the entire patch. */
1352                 partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
1353                         ((shmem_page_offset | page_length)
1354                                 & (boot_cpu_data.x86_clflush_size - 1));
1355
1356                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1357                         (page_to_phys(page) & (1 << 17)) != 0;
1358
1359                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1360                                         user_data, page_do_bit17_swizzling,
1361                                         partial_cacheline_write,
1362                                         needs_clflush & CLFLUSH_AFTER);
1363                 if (ret == 0)
1364                         goto next_page;
1365
1366                 hit_slowpath = 1;
1367                 mutex_unlock(&dev->struct_mutex);
1368                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1369                                         user_data, page_do_bit17_swizzling,
1370                                         partial_cacheline_write,
1371                                         needs_clflush & CLFLUSH_AFTER);
1372
1373                 mutex_lock(&dev->struct_mutex);
1374
1375                 if (ret)
1376                         goto out;
1377
1378 next_page:
1379                 remain -= page_length;
1380                 user_data += page_length;
1381                 offset += page_length;
1382         }
1383
1384 out:
1385         i915_gem_obj_finish_shmem_access(obj);
1386
1387         if (hit_slowpath) {
1388                 /*
1389                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1390                  * cachelines in-line while writing and the object moved
1391                  * out of the cpu write domain while we've dropped the lock.
1392                  */
1393                 if (!(needs_clflush & CLFLUSH_AFTER) &&
1394                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1395                         if (i915_gem_clflush_object(obj, obj->pin_display))
1396                                 needs_clflush |= CLFLUSH_AFTER;
1397                 }
1398         }
1399
1400         if (needs_clflush & CLFLUSH_AFTER)
1401                 i915_gem_chipset_flush(to_i915(dev));
1402
1403         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1404         return ret;
1405 }
1406
1407 /**
1408  * Writes data to the object referenced by handle.
1409  * @dev: drm device
1410  * @data: ioctl data blob
1411  * @file: drm file
1412  *
1413  * On error, the contents of the buffer that were to be modified are undefined.
1414  */
1415 int
1416 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1417                       struct drm_file *file)
1418 {
1419         struct drm_i915_private *dev_priv = to_i915(dev);
1420         struct drm_i915_gem_pwrite *args = data;
1421         struct drm_i915_gem_object *obj;
1422         int ret;
1423
1424         if (args->size == 0)
1425                 return 0;
1426
1427         if (!access_ok(VERIFY_READ,
1428                        u64_to_user_ptr(args->data_ptr),
1429                        args->size))
1430                 return -EFAULT;
1431
1432         if (likely(!i915.prefault_disable)) {
1433                 ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
1434                                                    args->size);
1435                 if (ret)
1436                         return -EFAULT;
1437         }
1438
1439         obj = i915_gem_object_lookup(file, args->handle);
1440         if (!obj)
1441                 return -ENOENT;
1442
1443         /* Bounds check destination. */
1444         if (args->offset > obj->base.size ||
1445             args->size > obj->base.size - args->offset) {
1446                 ret = -EINVAL;
1447                 goto err;
1448         }
1449
1450         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1451
1452         ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
1453         if (ret)
1454                 goto err;
1455
1456         intel_runtime_pm_get(dev_priv);
1457
1458         ret = i915_mutex_lock_interruptible(dev);
1459         if (ret)
1460                 goto err_rpm;
1461
1462         ret = -EFAULT;
1463         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1464          * it would end up going through the fenced access, and we'll get
1465          * different detiling behavior between reading and writing.
1466          * pread/pwrite currently are reading and writing from the CPU
1467          * perspective, requiring manual detiling by the client.
1468          */
1469         if (!i915_gem_object_has_struct_page(obj) ||
1470             cpu_write_needs_clflush(obj))
1471                 /* Note that the gtt paths might fail with non-page-backed user
1472                  * pointers (e.g. gtt mappings when moving data between
1473                  * textures). Fallback to the shmem path in that case.
1474                  */
1475                 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1476
1477         if (ret == -EFAULT || ret == -ENOSPC) {
1478                 if (obj->phys_handle)
1479                         ret = i915_gem_phys_pwrite(obj, args, file);
1480                 else
1481                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1482         }
1483
1484         i915_gem_object_put(obj);
1485         mutex_unlock(&dev->struct_mutex);
1486         intel_runtime_pm_put(dev_priv);
1487
1488         return ret;
1489
1490 err_rpm:
1491         intel_runtime_pm_put(dev_priv);
1492 err:
1493         i915_gem_object_put_unlocked(obj);
1494         return ret;
1495 }
1496
1497 static inline enum fb_op_origin
1498 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1499 {
1500         return (domain == I915_GEM_DOMAIN_GTT ?
1501                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1502 }
1503
1504 /**
1505  * Called when user space prepares to use an object with the CPU, either
1506  * through the mmap ioctl's mapping or a GTT mapping.
1507  * @dev: drm device
1508  * @data: ioctl data blob
1509  * @file: drm file
1510  */
1511 int
1512 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1513                           struct drm_file *file)
1514 {
1515         struct drm_i915_gem_set_domain *args = data;
1516         struct drm_i915_gem_object *obj;
1517         uint32_t read_domains = args->read_domains;
1518         uint32_t write_domain = args->write_domain;
1519         int ret;
1520
1521         /* Only handle setting domains to types used by the CPU. */
1522         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1523                 return -EINVAL;
1524
1525         /* Having something in the write domain implies it's in the read
1526          * domain, and only that read domain.  Enforce that in the request.
1527          */
1528         if (write_domain != 0 && read_domains != write_domain)
1529                 return -EINVAL;
1530
1531         obj = i915_gem_object_lookup(file, args->handle);
1532         if (!obj)
1533                 return -ENOENT;
1534
1535         /* Try to flush the object off the GPU without holding the lock.
1536          * We will repeat the flush holding the lock in the normal manner
1537          * to catch cases where we are gazumped.
1538          */
1539         ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
1540         if (ret)
1541                 goto err;
1542
1543         ret = i915_mutex_lock_interruptible(dev);
1544         if (ret)
1545                 goto err;
1546
1547         if (read_domains & I915_GEM_DOMAIN_GTT)
1548                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1549         else
1550                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1551
1552         if (write_domain != 0)
1553                 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1554
1555         i915_gem_object_put(obj);
1556         mutex_unlock(&dev->struct_mutex);
1557         return ret;
1558
1559 err:
1560         i915_gem_object_put_unlocked(obj);
1561         return ret;
1562 }
1563
1564 /**
1565  * Called when user space has done writes to this buffer
1566  * @dev: drm device
1567  * @data: ioctl data blob
1568  * @file: drm file
1569  */
1570 int
1571 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1572                          struct drm_file *file)
1573 {
1574         struct drm_i915_gem_sw_finish *args = data;
1575         struct drm_i915_gem_object *obj;
1576         int err = 0;
1577
1578         obj = i915_gem_object_lookup(file, args->handle);
1579         if (!obj)
1580                 return -ENOENT;
1581
1582         /* Pinned buffers may be scanout, so flush the cache */
1583         if (READ_ONCE(obj->pin_display)) {
1584                 err = i915_mutex_lock_interruptible(dev);
1585                 if (!err) {
1586                         i915_gem_object_flush_cpu_write_domain(obj);
1587                         mutex_unlock(&dev->struct_mutex);
1588                 }
1589         }
1590
1591         i915_gem_object_put_unlocked(obj);
1592         return err;
1593 }
1594
1595 /**
1596  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1597  *                       it is mapped to.
1598  * @dev: drm device
1599  * @data: ioctl data blob
1600  * @file: drm file
1601  *
1602  * While the mapping holds a reference on the contents of the object, it doesn't
1603  * imply a ref on the object itself.
1604  *
1605  * IMPORTANT:
1606  *
1607  * DRM driver writers who look a this function as an example for how to do GEM
1608  * mmap support, please don't implement mmap support like here. The modern way
1609  * to implement DRM mmap support is with an mmap offset ioctl (like
1610  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1611  * That way debug tooling like valgrind will understand what's going on, hiding
1612  * the mmap call in a driver private ioctl will break that. The i915 driver only
1613  * does cpu mmaps this way because we didn't know better.
1614  */
1615 int
1616 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1617                     struct drm_file *file)
1618 {
1619         struct drm_i915_gem_mmap *args = data;
1620         struct drm_i915_gem_object *obj;
1621         unsigned long addr;
1622
1623         if (args->flags & ~(I915_MMAP_WC))
1624                 return -EINVAL;
1625
1626         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1627                 return -ENODEV;
1628
1629         obj = i915_gem_object_lookup(file, args->handle);
1630         if (!obj)
1631                 return -ENOENT;
1632
1633         /* prime objects have no backing filp to GEM mmap
1634          * pages from.
1635          */
1636         if (!obj->base.filp) {
1637                 i915_gem_object_put_unlocked(obj);
1638                 return -EINVAL;
1639         }
1640
1641         addr = vm_mmap(obj->base.filp, 0, args->size,
1642                        PROT_READ | PROT_WRITE, MAP_SHARED,
1643                        args->offset);
1644         if (args->flags & I915_MMAP_WC) {
1645                 struct mm_struct *mm = current->mm;
1646                 struct vm_area_struct *vma;
1647
1648                 if (down_write_killable(&mm->mmap_sem)) {
1649                         i915_gem_object_put_unlocked(obj);
1650                         return -EINTR;
1651                 }
1652                 vma = find_vma(mm, addr);
1653                 if (vma)
1654                         vma->vm_page_prot =
1655                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1656                 else
1657                         addr = -ENOMEM;
1658                 up_write(&mm->mmap_sem);
1659
1660                 /* This may race, but that's ok, it only gets set */
1661                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1662         }
1663         i915_gem_object_put_unlocked(obj);
1664         if (IS_ERR((void *)addr))
1665                 return addr;
1666
1667         args->addr_ptr = (uint64_t) addr;
1668
1669         return 0;
1670 }
1671
1672 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1673 {
1674         u64 size;
1675
1676         size = i915_gem_object_get_stride(obj);
1677         size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1678
1679         return size >> PAGE_SHIFT;
1680 }
1681
1682 /**
1683  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1684  *
1685  * A history of the GTT mmap interface:
1686  *
1687  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1688  *     aligned and suitable for fencing, and still fit into the available
1689  *     mappable space left by the pinned display objects. A classic problem
1690  *     we called the page-fault-of-doom where we would ping-pong between
1691  *     two objects that could not fit inside the GTT and so the memcpy
1692  *     would page one object in at the expense of the other between every
1693  *     single byte.
1694  *
1695  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1696  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1697  *     object is too large for the available space (or simply too large
1698  *     for the mappable aperture!), a view is created instead and faulted
1699  *     into userspace. (This view is aligned and sized appropriately for
1700  *     fenced access.)
1701  *
1702  * Restrictions:
1703  *
1704  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1705  *    hangs on some architectures, corruption on others. An attempt to service
1706  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1707  *
1708  *  * the object must be able to fit into RAM (physical memory, though no
1709  *    limited to the mappable aperture).
1710  *
1711  *
1712  * Caveats:
1713  *
1714  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1715  *    all data to system memory. Subsequent access will not be synchronized.
1716  *
1717  *  * all mappings are revoked on runtime device suspend.
1718  *
1719  *  * there are only 8, 16 or 32 fence registers to share between all users
1720  *    (older machines require fence register for display and blitter access
1721  *    as well). Contention of the fence registers will cause the previous users
1722  *    to be unmapped and any new access will generate new page faults.
1723  *
1724  *  * running out of memory while servicing a fault may generate a SIGBUS,
1725  *    rather than the expected SIGSEGV.
1726  */
1727 int i915_gem_mmap_gtt_version(void)
1728 {
1729         return 1;
1730 }
1731
1732 /**
1733  * i915_gem_fault - fault a page into the GTT
1734  * @area: CPU VMA in question
1735  * @vmf: fault info
1736  *
1737  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1738  * from userspace.  The fault handler takes care of binding the object to
1739  * the GTT (if needed), allocating and programming a fence register (again,
1740  * only if needed based on whether the old reg is still valid or the object
1741  * is tiled) and inserting a new PTE into the faulting process.
1742  *
1743  * Note that the faulting process may involve evicting existing objects
1744  * from the GTT and/or fence registers to make room.  So performance may
1745  * suffer if the GTT working set is large or there are few fence registers
1746  * left.
1747  *
1748  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1749  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1750  */
1751 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1752 {
1753 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1754         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1755         struct drm_device *dev = obj->base.dev;
1756         struct drm_i915_private *dev_priv = to_i915(dev);
1757         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1758         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1759         struct i915_vma *vma;
1760         pgoff_t page_offset;
1761         unsigned int flags;
1762         int ret;
1763
1764         /* We don't use vmf->pgoff since that has the fake offset */
1765         page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1766                 PAGE_SHIFT;
1767
1768         trace_i915_gem_object_fault(obj, page_offset, true, write);
1769
1770         /* Try to flush the object off the GPU first without holding the lock.
1771          * Upon acquiring the lock, we will perform our sanity checks and then
1772          * repeat the flush holding the lock in the normal manner to catch cases
1773          * where we are gazumped.
1774          */
1775         ret = __unsafe_wait_rendering(obj, NULL, !write);
1776         if (ret)
1777                 goto err;
1778
1779         intel_runtime_pm_get(dev_priv);
1780
1781         ret = i915_mutex_lock_interruptible(dev);
1782         if (ret)
1783                 goto err_rpm;
1784
1785         /* Access to snoopable pages through the GTT is incoherent. */
1786         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1787                 ret = -EFAULT;
1788                 goto err_unlock;
1789         }
1790
1791         /* If the object is smaller than a couple of partial vma, it is
1792          * not worth only creating a single partial vma - we may as well
1793          * clear enough space for the full object.
1794          */
1795         flags = PIN_MAPPABLE;
1796         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1797                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1798
1799         /* Now pin it into the GTT as needed */
1800         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1801         if (IS_ERR(vma)) {
1802                 struct i915_ggtt_view view;
1803                 unsigned int chunk_size;
1804
1805                 /* Use a partial view if it is bigger than available space */
1806                 chunk_size = MIN_CHUNK_PAGES;
1807                 if (i915_gem_object_is_tiled(obj))
1808                         chunk_size = max(chunk_size, tile_row_pages(obj));
1809
1810                 memset(&view, 0, sizeof(view));
1811                 view.type = I915_GGTT_VIEW_PARTIAL;
1812                 view.params.partial.offset = rounddown(page_offset, chunk_size);
1813                 view.params.partial.size =
1814                         min_t(unsigned int, chunk_size,
1815                               vma_pages(area) - view.params.partial.offset);
1816
1817                 /* If the partial covers the entire object, just create a
1818                  * normal VMA.
1819                  */
1820                 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1821                         view.type = I915_GGTT_VIEW_NORMAL;
1822
1823                 /* Userspace is now writing through an untracked VMA, abandon
1824                  * all hope that the hardware is able to track future writes.
1825                  */
1826                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1827
1828                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1829         }
1830         if (IS_ERR(vma)) {
1831                 ret = PTR_ERR(vma);
1832                 goto err_unlock;
1833         }
1834
1835         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1836         if (ret)
1837                 goto err_unpin;
1838
1839         ret = i915_vma_get_fence(vma);
1840         if (ret)
1841                 goto err_unpin;
1842
1843         /* Mark as being mmapped into userspace for later revocation */
1844         assert_rpm_wakelock_held(dev_priv);
1845         if (list_empty(&obj->userfault_link))
1846                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1847
1848         /* Finally, remap it using the new GTT offset */
1849         ret = remap_io_mapping(area,
1850                                area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1851                                (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1852                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1853                                &ggtt->mappable);
1854
1855 err_unpin:
1856         __i915_vma_unpin(vma);
1857 err_unlock:
1858         mutex_unlock(&dev->struct_mutex);
1859 err_rpm:
1860         intel_runtime_pm_put(dev_priv);
1861 err:
1862         switch (ret) {
1863         case -EIO:
1864                 /*
1865                  * We eat errors when the gpu is terminally wedged to avoid
1866                  * userspace unduly crashing (gl has no provisions for mmaps to
1867                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1868                  * and so needs to be reported.
1869                  */
1870                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1871                         ret = VM_FAULT_SIGBUS;
1872                         break;
1873                 }
1874         case -EAGAIN:
1875                 /*
1876                  * EAGAIN means the gpu is hung and we'll wait for the error
1877                  * handler to reset everything when re-faulting in
1878                  * i915_mutex_lock_interruptible.
1879                  */
1880         case 0:
1881         case -ERESTARTSYS:
1882         case -EINTR:
1883         case -EBUSY:
1884                 /*
1885                  * EBUSY is ok: this just means that another thread
1886                  * already did the job.
1887                  */
1888                 ret = VM_FAULT_NOPAGE;
1889                 break;
1890         case -ENOMEM:
1891                 ret = VM_FAULT_OOM;
1892                 break;
1893         case -ENOSPC:
1894         case -EFAULT:
1895                 ret = VM_FAULT_SIGBUS;
1896                 break;
1897         default:
1898                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1899                 ret = VM_FAULT_SIGBUS;
1900                 break;
1901         }
1902         return ret;
1903 }
1904
1905 /**
1906  * i915_gem_release_mmap - remove physical page mappings
1907  * @obj: obj in question
1908  *
1909  * Preserve the reservation of the mmapping with the DRM core code, but
1910  * relinquish ownership of the pages back to the system.
1911  *
1912  * It is vital that we remove the page mapping if we have mapped a tiled
1913  * object through the GTT and then lose the fence register due to
1914  * resource pressure. Similarly if the object has been moved out of the
1915  * aperture, than pages mapped into userspace must be revoked. Removing the
1916  * mapping will then trigger a page fault on the next user access, allowing
1917  * fixup by i915_gem_fault().
1918  */
1919 void
1920 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1921 {
1922         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1923
1924         /* Serialisation between user GTT access and our code depends upon
1925          * revoking the CPU's PTE whilst the mutex is held. The next user
1926          * pagefault then has to wait until we release the mutex.
1927          *
1928          * Note that RPM complicates somewhat by adding an additional
1929          * requirement that operations to the GGTT be made holding the RPM
1930          * wakeref.
1931          */
1932         lockdep_assert_held(&i915->drm.struct_mutex);
1933         intel_runtime_pm_get(i915);
1934
1935         if (list_empty(&obj->userfault_link))
1936                 goto out;
1937
1938         list_del_init(&obj->userfault_link);
1939         drm_vma_node_unmap(&obj->base.vma_node,
1940                            obj->base.dev->anon_inode->i_mapping);
1941
1942         /* Ensure that the CPU's PTE are revoked and there are not outstanding
1943          * memory transactions from userspace before we return. The TLB
1944          * flushing implied above by changing the PTE above *should* be
1945          * sufficient, an extra barrier here just provides us with a bit
1946          * of paranoid documentation about our requirement to serialise
1947          * memory writes before touching registers / GSM.
1948          */
1949         wmb();
1950
1951 out:
1952         intel_runtime_pm_put(i915);
1953 }
1954
1955 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
1956 {
1957         struct drm_i915_gem_object *obj, *on;
1958         int i;
1959
1960         /*
1961          * Only called during RPM suspend. All users of the userfault_list
1962          * must be holding an RPM wakeref to ensure that this can not
1963          * run concurrently with themselves (and use the struct_mutex for
1964          * protection between themselves).
1965          */
1966
1967         list_for_each_entry_safe(obj, on,
1968                                  &dev_priv->mm.userfault_list, userfault_link) {
1969                 list_del_init(&obj->userfault_link);
1970                 drm_vma_node_unmap(&obj->base.vma_node,
1971                                    obj->base.dev->anon_inode->i_mapping);
1972         }
1973
1974         /* The fence will be lost when the device powers down. If any were
1975          * in use by hardware (i.e. they are pinned), we should not be powering
1976          * down! All other fences will be reacquired by the user upon waking.
1977          */
1978         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1979                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1980
1981                 if (WARN_ON(reg->pin_count))
1982                         continue;
1983
1984                 if (!reg->vma)
1985                         continue;
1986
1987                 GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
1988                 reg->dirty = true;
1989         }
1990 }
1991
1992 /**
1993  * i915_gem_get_ggtt_size - return required global GTT size for an object
1994  * @dev_priv: i915 device
1995  * @size: object size
1996  * @tiling_mode: tiling mode
1997  *
1998  * Return the required global GTT size for an object, taking into account
1999  * potential fence register mapping.
2000  */
2001 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
2002                            u64 size, int tiling_mode)
2003 {
2004         u64 ggtt_size;
2005
2006         GEM_BUG_ON(size == 0);
2007
2008         if (INTEL_GEN(dev_priv) >= 4 ||
2009             tiling_mode == I915_TILING_NONE)
2010                 return size;
2011
2012         /* Previous chips need a power-of-two fence region when tiling */
2013         if (IS_GEN3(dev_priv))
2014                 ggtt_size = 1024*1024;
2015         else
2016                 ggtt_size = 512*1024;
2017
2018         while (ggtt_size < size)
2019                 ggtt_size <<= 1;
2020
2021         return ggtt_size;
2022 }
2023
2024 /**
2025  * i915_gem_get_ggtt_alignment - return required global GTT alignment
2026  * @dev_priv: i915 device
2027  * @size: object size
2028  * @tiling_mode: tiling mode
2029  * @fenced: is fenced alignment required or not
2030  *
2031  * Return the required global GTT alignment for an object, taking into account
2032  * potential fence register mapping.
2033  */
2034 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2035                                 int tiling_mode, bool fenced)
2036 {
2037         GEM_BUG_ON(size == 0);
2038
2039         /*
2040          * Minimum alignment is 4k (GTT page size), but might be greater
2041          * if a fence register is needed for the object.
2042          */
2043         if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2044             tiling_mode == I915_TILING_NONE)
2045                 return 4096;
2046
2047         /*
2048          * Previous chips need to be aligned to the size of the smallest
2049          * fence register that can contain the object.
2050          */
2051         return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2052 }
2053
2054 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2055 {
2056         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2057         int err;
2058
2059         err = drm_gem_create_mmap_offset(&obj->base);
2060         if (!err)
2061                 return 0;
2062
2063         /* We can idle the GPU locklessly to flush stale objects, but in order
2064          * to claim that space for ourselves, we need to take the big
2065          * struct_mutex to free the requests+objects and allocate our slot.
2066          */
2067         err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2068         if (err)
2069                 return err;
2070
2071         err = i915_mutex_lock_interruptible(&dev_priv->drm);
2072         if (!err) {
2073                 i915_gem_retire_requests(dev_priv);
2074                 err = drm_gem_create_mmap_offset(&obj->base);
2075                 mutex_unlock(&dev_priv->drm.struct_mutex);
2076         }
2077
2078         return err;
2079 }
2080
2081 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2082 {
2083         drm_gem_free_mmap_offset(&obj->base);
2084 }
2085
2086 int
2087 i915_gem_mmap_gtt(struct drm_file *file,
2088                   struct drm_device *dev,
2089                   uint32_t handle,
2090                   uint64_t *offset)
2091 {
2092         struct drm_i915_gem_object *obj;
2093         int ret;
2094
2095         obj = i915_gem_object_lookup(file, handle);
2096         if (!obj)
2097                 return -ENOENT;
2098
2099         ret = i915_gem_object_create_mmap_offset(obj);
2100         if (ret == 0)
2101                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2102
2103         i915_gem_object_put_unlocked(obj);
2104         return ret;
2105 }
2106
2107 /**
2108  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2109  * @dev: DRM device
2110  * @data: GTT mapping ioctl data
2111  * @file: GEM object info
2112  *
2113  * Simply returns the fake offset to userspace so it can mmap it.
2114  * The mmap call will end up in drm_gem_mmap(), which will set things
2115  * up so we can get faults in the handler above.
2116  *
2117  * The fault handler will take care of binding the object into the GTT
2118  * (since it may have been evicted to make room for something), allocating
2119  * a fence register, and mapping the appropriate aperture address into
2120  * userspace.
2121  */
2122 int
2123 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2124                         struct drm_file *file)
2125 {
2126         struct drm_i915_gem_mmap_gtt *args = data;
2127
2128         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2129 }
2130
2131 /* Immediately discard the backing storage */
2132 static void
2133 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2134 {
2135         i915_gem_object_free_mmap_offset(obj);
2136
2137         if (obj->base.filp == NULL)
2138                 return;
2139
2140         /* Our goal here is to return as much of the memory as
2141          * is possible back to the system as we are called from OOM.
2142          * To do this we must instruct the shmfs to drop all of its
2143          * backing pages, *now*.
2144          */
2145         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2146         obj->madv = __I915_MADV_PURGED;
2147 }
2148
2149 /* Try to discard unwanted pages */
2150 static void
2151 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2152 {
2153         struct address_space *mapping;
2154
2155         switch (obj->madv) {
2156         case I915_MADV_DONTNEED:
2157                 i915_gem_object_truncate(obj);
2158         case __I915_MADV_PURGED:
2159                 return;
2160         }
2161
2162         if (obj->base.filp == NULL)
2163                 return;
2164
2165         mapping = obj->base.filp->f_mapping,
2166         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2167 }
2168
2169 static void
2170 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2171 {
2172         struct sgt_iter sgt_iter;
2173         struct page *page;
2174         int ret;
2175
2176         BUG_ON(obj->madv == __I915_MADV_PURGED);
2177
2178         ret = i915_gem_object_set_to_cpu_domain(obj, true);
2179         if (WARN_ON(ret)) {
2180                 /* In the event of a disaster, abandon all caches and
2181                  * hope for the best.
2182                  */
2183                 i915_gem_clflush_object(obj, true);
2184                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2185         }
2186
2187         i915_gem_gtt_finish_object(obj);
2188
2189         if (i915_gem_object_needs_bit17_swizzle(obj))
2190                 i915_gem_object_save_bit_17_swizzle(obj);
2191
2192         if (obj->madv == I915_MADV_DONTNEED)
2193                 obj->dirty = 0;
2194
2195         for_each_sgt_page(page, sgt_iter, obj->pages) {
2196                 if (obj->dirty)
2197                         set_page_dirty(page);
2198
2199                 if (obj->madv == I915_MADV_WILLNEED)
2200                         mark_page_accessed(page);
2201
2202                 put_page(page);
2203         }
2204         obj->dirty = 0;
2205
2206         sg_free_table(obj->pages);
2207         kfree(obj->pages);
2208 }
2209
2210 int
2211 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2212 {
2213         const struct drm_i915_gem_object_ops *ops = obj->ops;
2214
2215         if (obj->pages == NULL)
2216                 return 0;
2217
2218         if (obj->pages_pin_count)
2219                 return -EBUSY;
2220
2221         GEM_BUG_ON(obj->bind_count);
2222
2223         /* ->put_pages might need to allocate memory for the bit17 swizzle
2224          * array, hence protect them from being reaped by removing them from gtt
2225          * lists early. */
2226         list_del(&obj->global_list);
2227
2228         if (obj->mapping) {
2229                 void *ptr;
2230
2231                 ptr = ptr_mask_bits(obj->mapping);
2232                 if (is_vmalloc_addr(ptr))
2233                         vunmap(ptr);
2234                 else
2235                         kunmap(kmap_to_page(ptr));
2236
2237                 obj->mapping = NULL;
2238         }
2239
2240         ops->put_pages(obj);
2241         obj->pages = NULL;
2242
2243         i915_gem_object_invalidate(obj);
2244
2245         return 0;
2246 }
2247
2248 static unsigned int swiotlb_max_size(void)
2249 {
2250 #if IS_ENABLED(CONFIG_SWIOTLB)
2251         return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
2252 #else
2253         return 0;
2254 #endif
2255 }
2256
2257 static int
2258 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2259 {
2260         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2261         int page_count, i;
2262         struct address_space *mapping;
2263         struct sg_table *st;
2264         struct scatterlist *sg;
2265         struct sgt_iter sgt_iter;
2266         struct page *page;
2267         unsigned long last_pfn = 0;     /* suppress gcc warning */
2268         unsigned int max_segment;
2269         int ret;
2270         gfp_t gfp;
2271
2272         /* Assert that the object is not currently in any GPU domain. As it
2273          * wasn't in the GTT, there shouldn't be any way it could have been in
2274          * a GPU cache
2275          */
2276         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2277         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2278
2279         max_segment = swiotlb_max_size();
2280         if (!max_segment)
2281                 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2282
2283         st = kmalloc(sizeof(*st), GFP_KERNEL);
2284         if (st == NULL)
2285                 return -ENOMEM;
2286
2287         page_count = obj->base.size / PAGE_SIZE;
2288         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2289                 kfree(st);
2290                 return -ENOMEM;
2291         }
2292
2293         /* Get the list of pages out of our struct file.  They'll be pinned
2294          * at this point until we release them.
2295          *
2296          * Fail silently without starting the shrinker
2297          */
2298         mapping = obj->base.filp->f_mapping;
2299         gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2300         gfp |= __GFP_NORETRY | __GFP_NOWARN;
2301         sg = st->sgl;
2302         st->nents = 0;
2303         for (i = 0; i < page_count; i++) {
2304                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2305                 if (IS_ERR(page)) {
2306                         i915_gem_shrink(dev_priv,
2307                                         page_count,
2308                                         I915_SHRINK_BOUND |
2309                                         I915_SHRINK_UNBOUND |
2310                                         I915_SHRINK_PURGEABLE);
2311                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2312                 }
2313                 if (IS_ERR(page)) {
2314                         /* We've tried hard to allocate the memory by reaping
2315                          * our own buffer, now let the real VM do its job and
2316                          * go down in flames if truly OOM.
2317                          */
2318                         page = shmem_read_mapping_page(mapping, i);
2319                         if (IS_ERR(page)) {
2320                                 ret = PTR_ERR(page);
2321                                 goto err_pages;
2322                         }
2323                 }
2324                 if (!i ||
2325                     sg->length >= max_segment ||
2326                     page_to_pfn(page) != last_pfn + 1) {
2327                         if (i)
2328                                 sg = sg_next(sg);
2329                         st->nents++;
2330                         sg_set_page(sg, page, PAGE_SIZE, 0);
2331                 } else {
2332                         sg->length += PAGE_SIZE;
2333                 }
2334                 last_pfn = page_to_pfn(page);
2335
2336                 /* Check that the i965g/gm workaround works. */
2337                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2338         }
2339         if (sg) /* loop terminated early; short sg table */
2340                 sg_mark_end(sg);
2341         obj->pages = st;
2342
2343         ret = i915_gem_gtt_prepare_object(obj);
2344         if (ret)
2345                 goto err_pages;
2346
2347         if (i915_gem_object_needs_bit17_swizzle(obj))
2348                 i915_gem_object_do_bit_17_swizzle(obj);
2349
2350         if (i915_gem_object_is_tiled(obj) &&
2351             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2352                 i915_gem_object_pin_pages(obj);
2353
2354         return 0;
2355
2356 err_pages:
2357         sg_mark_end(sg);
2358         for_each_sgt_page(page, sgt_iter, st)
2359                 put_page(page);
2360         sg_free_table(st);
2361         kfree(st);
2362
2363         /* shmemfs first checks if there is enough memory to allocate the page
2364          * and reports ENOSPC should there be insufficient, along with the usual
2365          * ENOMEM for a genuine allocation failure.
2366          *
2367          * We use ENOSPC in our driver to mean that we have run out of aperture
2368          * space and so want to translate the error from shmemfs back to our
2369          * usual understanding of ENOMEM.
2370          */
2371         if (ret == -ENOSPC)
2372                 ret = -ENOMEM;
2373
2374         return ret;
2375 }
2376
2377 /* Ensure that the associated pages are gathered from the backing storage
2378  * and pinned into our object. i915_gem_object_get_pages() may be called
2379  * multiple times before they are released by a single call to
2380  * i915_gem_object_put_pages() - once the pages are no longer referenced
2381  * either as a result of memory pressure (reaping pages under the shrinker)
2382  * or as the object is itself released.
2383  */
2384 int
2385 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2386 {
2387         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2388         const struct drm_i915_gem_object_ops *ops = obj->ops;
2389         int ret;
2390
2391         if (obj->pages)
2392                 return 0;
2393
2394         if (obj->madv != I915_MADV_WILLNEED) {
2395                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2396                 return -EFAULT;
2397         }
2398
2399         BUG_ON(obj->pages_pin_count);
2400
2401         ret = ops->get_pages(obj);
2402         if (ret)
2403                 return ret;
2404
2405         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2406
2407         obj->get_page.sg = obj->pages->sgl;
2408         obj->get_page.last = 0;
2409
2410         return 0;
2411 }
2412
2413 /* The 'mapping' part of i915_gem_object_pin_map() below */
2414 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2415                                  enum i915_map_type type)
2416 {
2417         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2418         struct sg_table *sgt = obj->pages;
2419         struct sgt_iter sgt_iter;
2420         struct page *page;
2421         struct page *stack_pages[32];
2422         struct page **pages = stack_pages;
2423         unsigned long i = 0;
2424         pgprot_t pgprot;
2425         void *addr;
2426
2427         /* A single page can always be kmapped */
2428         if (n_pages == 1 && type == I915_MAP_WB)
2429                 return kmap(sg_page(sgt->sgl));
2430
2431         if (n_pages > ARRAY_SIZE(stack_pages)) {
2432                 /* Too big for stack -- allocate temporary array instead */
2433                 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2434                 if (!pages)
2435                         return NULL;
2436         }
2437
2438         for_each_sgt_page(page, sgt_iter, sgt)
2439                 pages[i++] = page;
2440
2441         /* Check that we have the expected number of pages */
2442         GEM_BUG_ON(i != n_pages);
2443
2444         switch (type) {
2445         case I915_MAP_WB:
2446                 pgprot = PAGE_KERNEL;
2447                 break;
2448         case I915_MAP_WC:
2449                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2450                 break;
2451         }
2452         addr = vmap(pages, n_pages, 0, pgprot);
2453
2454         if (pages != stack_pages)
2455                 drm_free_large(pages);
2456
2457         return addr;
2458 }
2459
2460 /* get, pin, and map the pages of the object into kernel space */
2461 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2462                               enum i915_map_type type)
2463 {
2464         enum i915_map_type has_type;
2465         bool pinned;
2466         void *ptr;
2467         int ret;
2468
2469         lockdep_assert_held(&obj->base.dev->struct_mutex);
2470         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2471
2472         ret = i915_gem_object_get_pages(obj);
2473         if (ret)
2474                 return ERR_PTR(ret);
2475
2476         i915_gem_object_pin_pages(obj);
2477         pinned = obj->pages_pin_count > 1;
2478
2479         ptr = ptr_unpack_bits(obj->mapping, has_type);
2480         if (ptr && has_type != type) {
2481                 if (pinned) {
2482                         ret = -EBUSY;
2483                         goto err;
2484                 }
2485
2486                 if (is_vmalloc_addr(ptr))
2487                         vunmap(ptr);
2488                 else
2489                         kunmap(kmap_to_page(ptr));
2490
2491                 ptr = obj->mapping = NULL;
2492         }
2493
2494         if (!ptr) {
2495                 ptr = i915_gem_object_map(obj, type);
2496                 if (!ptr) {
2497                         ret = -ENOMEM;
2498                         goto err;
2499                 }
2500
2501                 obj->mapping = ptr_pack_bits(ptr, type);
2502         }
2503
2504         return ptr;
2505
2506 err:
2507         i915_gem_object_unpin_pages(obj);
2508         return ERR_PTR(ret);
2509 }
2510
2511 static void
2512 i915_gem_object_retire__write(struct i915_gem_active *active,
2513                               struct drm_i915_gem_request *request)
2514 {
2515         struct drm_i915_gem_object *obj =
2516                 container_of(active, struct drm_i915_gem_object, last_write);
2517
2518         intel_fb_obj_flush(obj, true, ORIGIN_CS);
2519 }
2520
2521 static void
2522 i915_gem_object_retire__read(struct i915_gem_active *active,
2523                              struct drm_i915_gem_request *request)
2524 {
2525         int idx = request->engine->id;
2526         struct drm_i915_gem_object *obj =
2527                 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2528
2529         GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
2530
2531         i915_gem_object_clear_active(obj, idx);
2532         if (i915_gem_object_is_active(obj))
2533                 return;
2534
2535         /* Bump our place on the bound list to keep it roughly in LRU order
2536          * so that we don't steal from recently used but inactive objects
2537          * (unless we are forced to ofc!)
2538          */
2539         if (obj->bind_count)
2540                 list_move_tail(&obj->global_list,
2541                                &request->i915->mm.bound_list);
2542
2543         i915_gem_object_put(obj);
2544 }
2545
2546 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2547 {
2548         unsigned long elapsed;
2549
2550         if (ctx->hang_stats.banned)
2551                 return true;
2552
2553         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2554         if (ctx->hang_stats.ban_period_seconds &&
2555             elapsed <= ctx->hang_stats.ban_period_seconds) {
2556                 DRM_DEBUG("context hanging too fast, banning!\n");
2557                 return true;
2558         }
2559
2560         return false;
2561 }
2562
2563 static void i915_set_reset_status(struct i915_gem_context *ctx,
2564                                   const bool guilty)
2565 {
2566         struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2567
2568         if (guilty) {
2569                 hs->banned = i915_context_is_banned(ctx);
2570                 hs->batch_active++;
2571                 hs->guilty_ts = get_seconds();
2572         } else {
2573                 hs->batch_pending++;
2574         }
2575 }
2576
2577 struct drm_i915_gem_request *
2578 i915_gem_find_active_request(struct intel_engine_cs *engine)
2579 {
2580         struct drm_i915_gem_request *request;
2581
2582         /* We are called by the error capture and reset at a random
2583          * point in time. In particular, note that neither is crucially
2584          * ordered with an interrupt. After a hang, the GPU is dead and we
2585          * assume that no more writes can happen (we waited long enough for
2586          * all writes that were in transaction to be flushed) - adding an
2587          * extra delay for a recent interrupt is pointless. Hence, we do
2588          * not need an engine->irq_seqno_barrier() before the seqno reads.
2589          */
2590         list_for_each_entry(request, &engine->request_list, link) {
2591                 if (i915_gem_request_completed(request))
2592                         continue;
2593
2594                 if (!i915_sw_fence_done(&request->submit))
2595                         break;
2596
2597                 return request;
2598         }
2599
2600         return NULL;
2601 }
2602
2603 static void reset_request(struct drm_i915_gem_request *request)
2604 {
2605         void *vaddr = request->ring->vaddr;
2606         u32 head;
2607
2608         /* As this request likely depends on state from the lost
2609          * context, clear out all the user operations leaving the
2610          * breadcrumb at the end (so we get the fence notifications).
2611          */
2612         head = request->head;
2613         if (request->postfix < head) {
2614                 memset(vaddr + head, 0, request->ring->size - head);
2615                 head = 0;
2616         }
2617         memset(vaddr + head, 0, request->postfix - head);
2618 }
2619
2620 static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2621 {
2622         struct drm_i915_gem_request *request;
2623         struct i915_gem_context *incomplete_ctx;
2624         bool ring_hung;
2625
2626         if (engine->irq_seqno_barrier)
2627                 engine->irq_seqno_barrier(engine);
2628
2629         request = i915_gem_find_active_request(engine);
2630         if (!request)
2631                 return;
2632
2633         ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2634         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
2635                 ring_hung = false;
2636
2637         i915_set_reset_status(request->ctx, ring_hung);
2638         if (!ring_hung)
2639                 return;
2640
2641         DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2642                          engine->name, request->fence.seqno);
2643
2644         /* Setup the CS to resume from the breadcrumb of the hung request */
2645         engine->reset_hw(engine, request);
2646
2647         /* Users of the default context do not rely on logical state
2648          * preserved between batches. They have to emit full state on
2649          * every batch and so it is safe to execute queued requests following
2650          * the hang.
2651          *
2652          * Other contexts preserve state, now corrupt. We want to skip all
2653          * queued requests that reference the corrupt context.
2654          */
2655         incomplete_ctx = request->ctx;
2656         if (i915_gem_context_is_default(incomplete_ctx))
2657                 return;
2658
2659         list_for_each_entry_continue(request, &engine->request_list, link)
2660                 if (request->ctx == incomplete_ctx)
2661                         reset_request(request);
2662 }
2663
2664 void i915_gem_reset(struct drm_i915_private *dev_priv)
2665 {
2666         struct intel_engine_cs *engine;
2667         enum intel_engine_id id;
2668
2669         i915_gem_retire_requests(dev_priv);
2670
2671         for_each_engine(engine, dev_priv, id)
2672                 i915_gem_reset_engine(engine);
2673
2674         i915_gem_restore_fences(&dev_priv->drm);
2675
2676         if (dev_priv->gt.awake) {
2677                 intel_sanitize_gt_powersave(dev_priv);
2678                 intel_enable_gt_powersave(dev_priv);
2679                 if (INTEL_GEN(dev_priv) >= 6)
2680                         gen6_rps_busy(dev_priv);
2681         }
2682 }
2683
2684 static void nop_submit_request(struct drm_i915_gem_request *request)
2685 {
2686 }
2687
2688 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2689 {
2690         engine->submit_request = nop_submit_request;
2691
2692         /* Mark all pending requests as complete so that any concurrent
2693          * (lockless) lookup doesn't try and wait upon the request as we
2694          * reset it.
2695          */
2696         intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2697
2698         /*
2699          * Clear the execlists queue up before freeing the requests, as those
2700          * are the ones that keep the context and ringbuffer backing objects
2701          * pinned in place.
2702          */
2703
2704         if (i915.enable_execlists) {
2705                 spin_lock(&engine->execlist_lock);
2706                 INIT_LIST_HEAD(&engine->execlist_queue);
2707                 i915_gem_request_put(engine->execlist_port[0].request);
2708                 i915_gem_request_put(engine->execlist_port[1].request);
2709                 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2710                 spin_unlock(&engine->execlist_lock);
2711         }
2712
2713         engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2714 }
2715
2716 void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2717 {
2718         struct intel_engine_cs *engine;
2719         enum intel_engine_id id;
2720
2721         lockdep_assert_held(&dev_priv->drm.struct_mutex);
2722         set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2723
2724         i915_gem_context_lost(dev_priv);
2725         for_each_engine(engine, dev_priv, id)
2726                 i915_gem_cleanup_engine(engine);
2727         mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2728
2729         i915_gem_retire_requests(dev_priv);
2730 }
2731
2732 static void
2733 i915_gem_retire_work_handler(struct work_struct *work)
2734 {
2735         struct drm_i915_private *dev_priv =
2736                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2737         struct drm_device *dev = &dev_priv->drm;
2738
2739         /* Come back later if the device is busy... */
2740         if (mutex_trylock(&dev->struct_mutex)) {
2741                 i915_gem_retire_requests(dev_priv);
2742                 mutex_unlock(&dev->struct_mutex);
2743         }
2744
2745         /* Keep the retire handler running until we are finally idle.
2746          * We do not need to do this test under locking as in the worst-case
2747          * we queue the retire worker once too often.
2748          */
2749         if (READ_ONCE(dev_priv->gt.awake)) {
2750                 i915_queue_hangcheck(dev_priv);
2751                 queue_delayed_work(dev_priv->wq,
2752                                    &dev_priv->gt.retire_work,
2753                                    round_jiffies_up_relative(HZ));
2754         }
2755 }
2756
2757 static void
2758 i915_gem_idle_work_handler(struct work_struct *work)
2759 {
2760         struct drm_i915_private *dev_priv =
2761                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2762         struct drm_device *dev = &dev_priv->drm;
2763         struct intel_engine_cs *engine;
2764         enum intel_engine_id id;
2765         bool rearm_hangcheck;
2766
2767         if (!READ_ONCE(dev_priv->gt.awake))
2768                 return;
2769
2770         if (READ_ONCE(dev_priv->gt.active_engines))
2771                 return;
2772
2773         rearm_hangcheck =
2774                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2775
2776         if (!mutex_trylock(&dev->struct_mutex)) {
2777                 /* Currently busy, come back later */
2778                 mod_delayed_work(dev_priv->wq,
2779                                  &dev_priv->gt.idle_work,
2780                                  msecs_to_jiffies(50));
2781                 goto out_rearm;
2782         }
2783
2784         if (dev_priv->gt.active_engines)
2785                 goto out_unlock;
2786
2787         for_each_engine(engine, dev_priv, id)
2788                 i915_gem_batch_pool_fini(&engine->batch_pool);
2789
2790         GEM_BUG_ON(!dev_priv->gt.awake);
2791         dev_priv->gt.awake = false;
2792         rearm_hangcheck = false;
2793
2794         if (INTEL_GEN(dev_priv) >= 6)
2795                 gen6_rps_idle(dev_priv);
2796         intel_runtime_pm_put(dev_priv);
2797 out_unlock:
2798         mutex_unlock(&dev->struct_mutex);
2799
2800 out_rearm:
2801         if (rearm_hangcheck) {
2802                 GEM_BUG_ON(!dev_priv->gt.awake);
2803                 i915_queue_hangcheck(dev_priv);
2804         }
2805 }
2806
2807 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2808 {
2809         struct drm_i915_gem_object *obj = to_intel_bo(gem);
2810         struct drm_i915_file_private *fpriv = file->driver_priv;
2811         struct i915_vma *vma, *vn;
2812
2813         mutex_lock(&obj->base.dev->struct_mutex);
2814         list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2815                 if (vma->vm->file == fpriv)
2816                         i915_vma_close(vma);
2817         mutex_unlock(&obj->base.dev->struct_mutex);
2818 }
2819
2820 /**
2821  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2822  * @dev: drm device pointer
2823  * @data: ioctl data blob
2824  * @file: drm file pointer
2825  *
2826  * Returns 0 if successful, else an error is returned with the remaining time in
2827  * the timeout parameter.
2828  *  -ETIME: object is still busy after timeout
2829  *  -ERESTARTSYS: signal interrupted the wait
2830  *  -ENONENT: object doesn't exist
2831  * Also possible, but rare:
2832  *  -EAGAIN: GPU wedged
2833  *  -ENOMEM: damn
2834  *  -ENODEV: Internal IRQ fail
2835  *  -E?: The add request failed
2836  *
2837  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2838  * non-zero timeout parameter the wait ioctl will wait for the given number of
2839  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2840  * without holding struct_mutex the object may become re-busied before this
2841  * function completes. A similar but shorter * race condition exists in the busy
2842  * ioctl
2843  */
2844 int
2845 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2846 {
2847         struct drm_i915_gem_wait *args = data;
2848         struct intel_rps_client *rps = to_rps_client(file);
2849         struct drm_i915_gem_object *obj;
2850         unsigned long active;
2851         int idx, ret = 0;
2852
2853         if (args->flags != 0)
2854                 return -EINVAL;
2855
2856         obj = i915_gem_object_lookup(file, args->bo_handle);
2857         if (!obj)
2858                 return -ENOENT;
2859
2860         active = __I915_BO_ACTIVE(obj);
2861         for_each_active(active, idx) {
2862                 s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2863                 ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
2864                                                     I915_WAIT_INTERRUPTIBLE,
2865                                                     timeout, rps);
2866                 if (ret)
2867                         break;
2868         }
2869
2870         i915_gem_object_put_unlocked(obj);
2871         return ret;
2872 }
2873
2874 static void __i915_vma_iounmap(struct i915_vma *vma)
2875 {
2876         GEM_BUG_ON(i915_vma_is_pinned(vma));
2877
2878         if (vma->iomap == NULL)
2879                 return;
2880
2881         io_mapping_unmap(vma->iomap);
2882         vma->iomap = NULL;
2883 }
2884
2885 int i915_vma_unbind(struct i915_vma *vma)
2886 {
2887         struct drm_i915_gem_object *obj = vma->obj;
2888         unsigned long active;
2889         int ret;
2890
2891         /* First wait upon any activity as retiring the request may
2892          * have side-effects such as unpinning or even unbinding this vma.
2893          */
2894         active = i915_vma_get_active(vma);
2895         if (active) {
2896                 int idx;
2897
2898                 /* When a closed VMA is retired, it is unbound - eek.
2899                  * In order to prevent it from being recursively closed,
2900                  * take a pin on the vma so that the second unbind is
2901                  * aborted.
2902                  */
2903                 __i915_vma_pin(vma);
2904
2905                 for_each_active(active, idx) {
2906                         ret = i915_gem_active_retire(&vma->last_read[idx],
2907                                                    &vma->vm->dev->struct_mutex);
2908                         if (ret)
2909                                 break;
2910                 }
2911
2912                 __i915_vma_unpin(vma);
2913                 if (ret)
2914                         return ret;
2915
2916                 GEM_BUG_ON(i915_vma_is_active(vma));
2917         }
2918
2919         if (i915_vma_is_pinned(vma))
2920                 return -EBUSY;
2921
2922         if (!drm_mm_node_allocated(&vma->node))
2923                 goto destroy;
2924
2925         GEM_BUG_ON(obj->bind_count == 0);
2926         GEM_BUG_ON(!obj->pages);
2927
2928         if (i915_vma_is_map_and_fenceable(vma)) {
2929                 /* release the fence reg _after_ flushing */
2930                 ret = i915_vma_put_fence(vma);
2931                 if (ret)
2932                         return ret;
2933
2934                 /* Force a pagefault for domain tracking on next user access */
2935                 i915_gem_release_mmap(obj);
2936
2937                 __i915_vma_iounmap(vma);
2938                 vma->flags &= ~I915_VMA_CAN_FENCE;
2939         }
2940
2941         if (likely(!vma->vm->closed)) {
2942                 trace_i915_vma_unbind(vma);
2943                 vma->vm->unbind_vma(vma);
2944         }
2945         vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2946
2947         drm_mm_remove_node(&vma->node);
2948         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2949
2950         if (vma->pages != obj->pages) {
2951                 GEM_BUG_ON(!vma->pages);
2952                 sg_free_table(vma->pages);
2953                 kfree(vma->pages);
2954         }
2955         vma->pages = NULL;
2956
2957         /* Since the unbound list is global, only move to that list if
2958          * no more VMAs exist. */
2959         if (--obj->bind_count == 0)
2960                 list_move_tail(&obj->global_list,
2961                                &to_i915(obj->base.dev)->mm.unbound_list);
2962
2963         /* And finally now the object is completely decoupled from this vma,
2964          * we can drop its hold on the backing storage and allow it to be
2965          * reaped by the shrinker.
2966          */
2967         i915_gem_object_unpin_pages(obj);
2968
2969 destroy:
2970         if (unlikely(i915_vma_is_closed(vma)))
2971                 i915_vma_destroy(vma);
2972
2973         return 0;
2974 }
2975
2976 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2977                            unsigned int flags)
2978 {
2979         struct intel_engine_cs *engine;
2980         enum intel_engine_id id;
2981         int ret;
2982
2983         for_each_engine(engine, dev_priv, id) {
2984                 if (engine->last_context == NULL)
2985                         continue;
2986
2987                 ret = intel_engine_idle(engine, flags);
2988                 if (ret)
2989                         return ret;
2990         }
2991
2992         return 0;
2993 }
2994
2995 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2996                                      unsigned long cache_level)
2997 {
2998         struct drm_mm_node *gtt_space = &vma->node;
2999         struct drm_mm_node *other;
3000
3001         /*
3002          * On some machines we have to be careful when putting differing types
3003          * of snoopable memory together to avoid the prefetcher crossing memory
3004          * domains and dying. During vm initialisation, we decide whether or not
3005          * these constraints apply and set the drm_mm.color_adjust
3006          * appropriately.
3007          */
3008         if (vma->vm->mm.color_adjust == NULL)
3009                 return true;
3010
3011         if (!drm_mm_node_allocated(gtt_space))
3012                 return true;
3013
3014         if (list_empty(&gtt_space->node_list))
3015                 return true;
3016
3017         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3018         if (other->allocated && !other->hole_follows && other->color != cache_level)
3019                 return false;
3020
3021         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3022         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3023                 return false;
3024
3025         return true;
3026 }
3027
3028 /**
3029  * i915_vma_insert - finds a slot for the vma in its address space
3030  * @vma: the vma
3031  * @size: requested size in bytes (can be larger than the VMA)
3032  * @alignment: required alignment
3033  * @flags: mask of PIN_* flags to use
3034  *
3035  * First we try to allocate some free space that meets the requirements for
3036  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
3037  * preferrably the oldest idle entry to make room for the new VMA.
3038  *
3039  * Returns:
3040  * 0 on success, negative error code otherwise.
3041  */
3042 static int
3043 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3044 {
3045         struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
3046         struct drm_i915_gem_object *obj = vma->obj;
3047         u64 start, end;
3048         int ret;
3049
3050         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
3051         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
3052
3053         size = max(size, vma->size);
3054         if (flags & PIN_MAPPABLE)
3055                 size = i915_gem_get_ggtt_size(dev_priv, size,
3056                                               i915_gem_object_get_tiling(obj));
3057
3058         alignment = max(max(alignment, vma->display_alignment),
3059                         i915_gem_get_ggtt_alignment(dev_priv, size,
3060                                                     i915_gem_object_get_tiling(obj),
3061                                                     flags & PIN_MAPPABLE));
3062
3063         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3064
3065         end = vma->vm->total;
3066         if (flags & PIN_MAPPABLE)
3067                 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3068         if (flags & PIN_ZONE_4G)
3069                 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3070
3071         /* If binding the object/GGTT view requires more space than the entire
3072          * aperture has, reject it early before evicting everything in a vain
3073          * attempt to find space.
3074          */
3075         if (size > end) {
3076                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3077                           size, obj->base.size,
3078                           flags & PIN_MAPPABLE ? "mappable" : "total",
3079                           end);
3080                 return -E2BIG;
3081         }
3082
3083         ret = i915_gem_object_get_pages(obj);
3084         if (ret)
3085                 return ret;
3086
3087         i915_gem_object_pin_pages(obj);
3088
3089         if (flags & PIN_OFFSET_FIXED) {
3090                 u64 offset = flags & PIN_OFFSET_MASK;
3091                 if (offset & (alignment - 1) || offset > end - size) {
3092                         ret = -EINVAL;
3093                         goto err_unpin;
3094                 }
3095
3096                 vma->node.start = offset;
3097                 vma->node.size = size;
3098                 vma->node.color = obj->cache_level;
3099                 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3100                 if (ret) {
3101                         ret = i915_gem_evict_for_vma(vma);
3102                         if (ret == 0)
3103                                 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3104                         if (ret)
3105                                 goto err_unpin;
3106                 }
3107         } else {
3108                 u32 search_flag, alloc_flag;
3109
3110                 if (flags & PIN_HIGH) {
3111                         search_flag = DRM_MM_SEARCH_BELOW;
3112                         alloc_flag = DRM_MM_CREATE_TOP;
3113                 } else {
3114                         search_flag = DRM_MM_SEARCH_DEFAULT;
3115                         alloc_flag = DRM_MM_CREATE_DEFAULT;
3116                 }
3117
3118                 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3119                  * so we know that we always have a minimum alignment of 4096.
3120                  * The drm_mm range manager is optimised to return results
3121                  * with zero alignment, so where possible use the optimal
3122                  * path.
3123                  */
3124                 if (alignment <= 4096)
3125                         alignment = 0;
3126
3127 search_free:
3128                 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3129                                                           &vma->node,
3130                                                           size, alignment,
3131                                                           obj->cache_level,
3132                                                           start, end,
3133                                                           search_flag,
3134                                                           alloc_flag);
3135                 if (ret) {
3136                         ret = i915_gem_evict_something(vma->vm, size, alignment,
3137                                                        obj->cache_level,
3138                                                        start, end,
3139                                                        flags);
3140                         if (ret == 0)
3141                                 goto search_free;
3142
3143                         goto err_unpin;
3144                 }
3145
3146                 GEM_BUG_ON(vma->node.start < start);
3147                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
3148         }
3149         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3150
3151         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3152         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3153         obj->bind_count++;
3154
3155         return 0;
3156
3157 err_unpin:
3158         i915_gem_object_unpin_pages(obj);
3159         return ret;
3160 }
3161
3162 bool
3163 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3164                         bool force)
3165 {
3166         /* If we don't have a page list set up, then we're not pinned
3167          * to GPU, and we can ignore the cache flush because it'll happen
3168          * again at bind time.
3169          */
3170         if (obj->pages == NULL)
3171                 return false;
3172
3173         /*
3174          * Stolen memory is always coherent with the GPU as it is explicitly
3175          * marked as wc by the system, or the system is cache-coherent.
3176          */
3177         if (obj->stolen || obj->phys_handle)
3178                 return false;
3179
3180         /* If the GPU is snooping the contents of the CPU cache,
3181          * we do not need to manually clear the CPU cache lines.  However,
3182          * the caches are only snooped when the render cache is
3183          * flushed/invalidated.  As we always have to emit invalidations
3184          * and flushes when moving into and out of the RENDER domain, correct
3185          * snooping behaviour occurs naturally as the result of our domain
3186          * tracking.
3187          */
3188         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3189                 obj->cache_dirty = true;
3190                 return false;
3191         }
3192
3193         trace_i915_gem_object_clflush(obj);
3194         drm_clflush_sg(obj->pages);
3195         obj->cache_dirty = false;
3196
3197         return true;
3198 }
3199
3200 /** Flushes the GTT write domain for the object if it's dirty. */
3201 static void
3202 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3203 {
3204         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3205
3206         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3207                 return;
3208
3209         /* No actual flushing is required for the GTT write domain.  Writes
3210          * to it "immediately" go to main memory as far as we know, so there's
3211          * no chipset flush.  It also doesn't land in render cache.
3212          *
3213          * However, we do have to enforce the order so that all writes through
3214          * the GTT land before any writes to the device, such as updates to
3215          * the GATT itself.
3216          *
3217          * We also have to wait a bit for the writes to land from the GTT.
3218          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3219          * timing. This issue has only been observed when switching quickly
3220          * between GTT writes and CPU reads from inside the kernel on recent hw,
3221          * and it appears to only affect discrete GTT blocks (i.e. on LLC
3222          * system agents we cannot reproduce this behaviour).
3223          */
3224         wmb();
3225         if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3226                 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3227
3228         intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3229
3230         obj->base.write_domain = 0;
3231         trace_i915_gem_object_change_domain(obj,
3232                                             obj->base.read_domains,
3233                                             I915_GEM_DOMAIN_GTT);
3234 }
3235
3236 /** Flushes the CPU write domain for the object if it's dirty. */
3237 static void
3238 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3239 {
3240         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3241                 return;
3242
3243         if (i915_gem_clflush_object(obj, obj->pin_display))
3244                 i915_gem_chipset_flush(to_i915(obj->base.dev));
3245
3246         intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3247
3248         obj->base.write_domain = 0;
3249         trace_i915_gem_object_change_domain(obj,
3250                                             obj->base.read_domains,
3251                                             I915_GEM_DOMAIN_CPU);
3252 }
3253
3254 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
3255 {
3256         struct i915_vma *vma;
3257
3258         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3259                 if (!i915_vma_is_ggtt(vma))
3260                         continue;
3261
3262                 if (i915_vma_is_active(vma))
3263                         continue;
3264
3265                 if (!drm_mm_node_allocated(&vma->node))
3266                         continue;
3267
3268                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3269         }
3270 }
3271
3272 /**
3273  * Moves a single object to the GTT read, and possibly write domain.
3274  * @obj: object to act on
3275  * @write: ask for write access or read only
3276  *
3277  * This function returns when the move is complete, including waiting on
3278  * flushes to occur.
3279  */
3280 int
3281 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3282 {
3283         uint32_t old_write_domain, old_read_domains;
3284         int ret;
3285
3286         ret = i915_gem_object_wait_rendering(obj, !write);
3287         if (ret)
3288                 return ret;
3289
3290         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3291                 return 0;
3292
3293         /* Flush and acquire obj->pages so that we are coherent through
3294          * direct access in memory with previous cached writes through
3295          * shmemfs and that our cache domain tracking remains valid.
3296          * For example, if the obj->filp was moved to swap without us
3297          * being notified and releasing the pages, we would mistakenly
3298          * continue to assume that the obj remained out of the CPU cached
3299          * domain.
3300          */
3301         ret = i915_gem_object_get_pages(obj);
3302         if (ret)
3303                 return ret;
3304
3305         i915_gem_object_flush_cpu_write_domain(obj);
3306
3307         /* Serialise direct access to this object with the barriers for
3308          * coherent writes from the GPU, by effectively invalidating the
3309          * GTT domain upon first access.
3310          */
3311         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3312                 mb();
3313
3314         old_write_domain = obj->base.write_domain;
3315         old_read_domains = obj->base.read_domains;
3316
3317         /* It should now be out of any other write domains, and we can update
3318          * the domain values for our changes.
3319          */
3320         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3321         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3322         if (write) {
3323                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3324                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3325                 obj->dirty = 1;
3326         }
3327
3328         trace_i915_gem_object_change_domain(obj,
3329                                             old_read_domains,
3330                                             old_write_domain);
3331
3332         /* And bump the LRU for this access */
3333         i915_gem_object_bump_inactive_ggtt(obj);
3334
3335         return 0;
3336 }
3337
3338 /**
3339  * Changes the cache-level of an object across all VMA.
3340  * @obj: object to act on
3341  * @cache_level: new cache level to set for the object
3342  *
3343  * After this function returns, the object will be in the new cache-level
3344  * across all GTT and the contents of the backing storage will be coherent,
3345  * with respect to the new cache-level. In order to keep the backing storage
3346  * coherent for all users, we only allow a single cache level to be set
3347  * globally on the object and prevent it from being changed whilst the
3348  * hardware is reading from the object. That is if the object is currently
3349  * on the scanout it will be set to uncached (or equivalent display
3350  * cache coherency) and all non-MOCS GPU access will also be uncached so
3351  * that all direct access to the scanout remains coherent.
3352  */
3353 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3354                                     enum i915_cache_level cache_level)
3355 {
3356         struct i915_vma *vma;
3357         int ret = 0;
3358
3359         if (obj->cache_level == cache_level)
3360                 goto out;
3361
3362         /* Inspect the list of currently bound VMA and unbind any that would
3363          * be invalid given the new cache-level. This is principally to
3364          * catch the issue of the CS prefetch crossing page boundaries and
3365          * reading an invalid PTE on older architectures.
3366          */
3367 restart:
3368         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3369                 if (!drm_mm_node_allocated(&vma->node))
3370                         continue;
3371
3372                 if (i915_vma_is_pinned(vma)) {
3373                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3374                         return -EBUSY;
3375                 }
3376
3377                 if (i915_gem_valid_gtt_space(vma, cache_level))
3378                         continue;
3379
3380                 ret = i915_vma_unbind(vma);
3381                 if (ret)
3382                         return ret;
3383
3384                 /* As unbinding may affect other elements in the
3385                  * obj->vma_list (due to side-effects from retiring
3386                  * an active vma), play safe and restart the iterator.
3387                  */
3388                 goto restart;
3389         }
3390
3391         /* We can reuse the existing drm_mm nodes but need to change the
3392          * cache-level on the PTE. We could simply unbind them all and
3393          * rebind with the correct cache-level on next use. However since
3394          * we already have a valid slot, dma mapping, pages etc, we may as
3395          * rewrite the PTE in the belief that doing so tramples upon less
3396          * state and so involves less work.
3397          */
3398         if (obj->bind_count) {
3399                 /* Before we change the PTE, the GPU must not be accessing it.
3400                  * If we wait upon the object, we know that all the bound
3401                  * VMA are no longer active.
3402                  */
3403                 ret = i915_gem_object_wait_rendering(obj, false);
3404                 if (ret)
3405                         return ret;
3406
3407                 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3408                         /* Access to snoopable pages through the GTT is
3409                          * incoherent and on some machines causes a hard
3410                          * lockup. Relinquish the CPU mmaping to force
3411                          * userspace to refault in the pages and we can
3412                          * then double check if the GTT mapping is still
3413                          * valid for that pointer access.
3414                          */
3415                         i915_gem_release_mmap(obj);
3416
3417                         /* As we no longer need a fence for GTT access,
3418                          * we can relinquish it now (and so prevent having
3419                          * to steal a fence from someone else on the next
3420                          * fence request). Note GPU activity would have
3421                          * dropped the fence as all snoopable access is
3422                          * supposed to be linear.
3423                          */
3424                         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3425                                 ret = i915_vma_put_fence(vma);
3426                                 if (ret)
3427                                         return ret;
3428                         }
3429                 } else {
3430                         /* We either have incoherent backing store and
3431                          * so no GTT access or the architecture is fully
3432                          * coherent. In such cases, existing GTT mmaps
3433                          * ignore the cache bit in the PTE and we can
3434                          * rewrite it without confusing the GPU or having
3435                          * to force userspace to fault back in its mmaps.
3436                          */
3437                 }
3438
3439                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3440                         if (!drm_mm_node_allocated(&vma->node))
3441                                 continue;
3442
3443                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3444                         if (ret)
3445                                 return ret;
3446                 }
3447         }
3448
3449         list_for_each_entry(vma, &obj->vma_list, obj_link)
3450                 vma->node.color = cache_level;
3451         obj->cache_level = cache_level;
3452
3453 out:
3454         /* Flush the dirty CPU caches to the backing storage so that the
3455          * object is now coherent at its new cache level (with respect
3456          * to the access domain).
3457          */
3458         if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3459                 if (i915_gem_clflush_object(obj, true))
3460                         i915_gem_chipset_flush(to_i915(obj->base.dev));
3461         }
3462
3463         return 0;
3464 }
3465
3466 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3467                                struct drm_file *file)
3468 {
3469         struct drm_i915_gem_caching *args = data;
3470         struct drm_i915_gem_object *obj;
3471
3472         obj = i915_gem_object_lookup(file, args->handle);
3473         if (!obj)
3474                 return -ENOENT;
3475
3476         switch (obj->cache_level) {
3477         case I915_CACHE_LLC:
3478         case I915_CACHE_L3_LLC:
3479                 args->caching = I915_CACHING_CACHED;
3480                 break;
3481
3482         case I915_CACHE_WT:
3483                 args->caching = I915_CACHING_DISPLAY;
3484                 break;
3485
3486         default:
3487                 args->caching = I915_CACHING_NONE;
3488                 break;
3489         }
3490
3491         i915_gem_object_put_unlocked(obj);
3492         return 0;
3493 }
3494
3495 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3496                                struct drm_file *file)
3497 {
3498         struct drm_i915_private *i915 = to_i915(dev);
3499         struct drm_i915_gem_caching *args = data;
3500         struct drm_i915_gem_object *obj;
3501         enum i915_cache_level level;
3502         int ret;
3503
3504         switch (args->caching) {
3505         case I915_CACHING_NONE:
3506                 level = I915_CACHE_NONE;
3507                 break;
3508         case I915_CACHING_CACHED:
3509                 /*
3510                  * Due to a HW issue on BXT A stepping, GPU stores via a
3511                  * snooped mapping may leave stale data in a corresponding CPU
3512                  * cacheline, whereas normally such cachelines would get
3513                  * invalidated.
3514                  */
3515                 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3516                         return -ENODEV;
3517
3518                 level = I915_CACHE_LLC;
3519                 break;
3520         case I915_CACHING_DISPLAY:
3521                 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3522                 break;
3523         default:
3524                 return -EINVAL;
3525         }
3526
3527         ret = i915_mutex_lock_interruptible(dev);
3528         if (ret)
3529                 return ret;
3530
3531         obj = i915_gem_object_lookup(file, args->handle);
3532         if (!obj) {
3533                 ret = -ENOENT;
3534                 goto unlock;
3535         }
3536
3537         ret = i915_gem_object_set_cache_level(obj, level);
3538         i915_gem_object_put(obj);
3539 unlock:
3540         mutex_unlock(&dev->struct_mutex);
3541         return ret;
3542 }
3543
3544 /*
3545  * Prepare buffer for display plane (scanout, cursors, etc).
3546  * Can be called from an uninterruptible phase (modesetting) and allows
3547  * any flushes to be pipelined (for pageflips).
3548  */
3549 struct i915_vma *
3550 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3551                                      u32 alignment,
3552                                      const struct i915_ggtt_view *view)
3553 {
3554         struct i915_vma *vma;
3555         u32 old_read_domains, old_write_domain;
3556         int ret;
3557
3558         /* Mark the pin_display early so that we account for the
3559          * display coherency whilst setting up the cache domains.
3560          */
3561         obj->pin_display++;
3562
3563         /* The display engine is not coherent with the LLC cache on gen6.  As
3564          * a result, we make sure that the pinning that is about to occur is
3565          * done with uncached PTEs. This is lowest common denominator for all
3566          * chipsets.
3567          *
3568          * However for gen6+, we could do better by using the GFDT bit instead
3569          * of uncaching, which would allow us to flush all the LLC-cached data
3570          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3571          */
3572         ret = i915_gem_object_set_cache_level(obj,
3573                                               HAS_WT(to_i915(obj->base.dev)) ?
3574                                               I915_CACHE_WT : I915_CACHE_NONE);
3575         if (ret) {
3576                 vma = ERR_PTR(ret);
3577                 goto err_unpin_display;
3578         }
3579
3580         /* As the user may map the buffer once pinned in the display plane
3581          * (e.g. libkms for the bootup splash), we have to ensure that we
3582          * always use map_and_fenceable for all scanout buffers. However,
3583          * it may simply be too big to fit into mappable, in which case
3584          * put it anyway and hope that userspace can cope (but always first
3585          * try to preserve the existing ABI).
3586          */
3587         vma = ERR_PTR(-ENOSPC);
3588         if (view->type == I915_GGTT_VIEW_NORMAL)
3589                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3590                                                PIN_MAPPABLE | PIN_NONBLOCK);
3591         if (IS_ERR(vma))
3592                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
3593         if (IS_ERR(vma))
3594                 goto err_unpin_display;
3595
3596         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3597
3598         WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
3599
3600         i915_gem_object_flush_cpu_write_domain(obj);
3601
3602         old_write_domain = obj->base.write_domain;
3603         old_read_domains = obj->base.read_domains;
3604
3605         /* It should now be out of any other write domains, and we can update
3606          * the domain values for our changes.
3607          */
3608         obj->base.write_domain = 0;
3609         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3610
3611         trace_i915_gem_object_change_domain(obj,
3612                                             old_read_domains,
3613                                             old_write_domain);
3614
3615         return vma;
3616
3617 err_unpin_display:
3618         obj->pin_display--;
3619         return vma;
3620 }
3621
3622 void
3623 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3624 {
3625         if (WARN_ON(vma->obj->pin_display == 0))
3626                 return;
3627
3628         if (--vma->obj->pin_display == 0)
3629                 vma->display_alignment = 0;
3630
3631         /* Bump the LRU to try and avoid premature eviction whilst flipping  */
3632         if (!i915_vma_is_active(vma))
3633                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3634
3635         i915_vma_unpin(vma);
3636         WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
3637 }
3638
3639 /**
3640  * Moves a single object to the CPU read, and possibly write domain.
3641  * @obj: object to act on
3642  * @write: requesting write or read-only access
3643  *
3644  * This function returns when the move is complete, including waiting on
3645  * flushes to occur.
3646  */
3647 int
3648 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3649 {
3650         uint32_t old_write_domain, old_read_domains;
3651         int ret;
3652
3653         ret = i915_gem_object_wait_rendering(obj, !write);
3654         if (ret)
3655                 return ret;
3656
3657         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3658                 return 0;
3659
3660         i915_gem_object_flush_gtt_write_domain(obj);
3661
3662         old_write_domain = obj->base.write_domain;
3663         old_read_domains = obj->base.read_domains;
3664
3665         /* Flush the CPU cache if it's still invalid. */
3666         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3667                 i915_gem_clflush_object(obj, false);
3668
3669                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3670         }
3671
3672         /* It should now be out of any other write domains, and we can update
3673          * the domain values for our changes.
3674          */
3675         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3676
3677         /* If we're writing through the CPU, then the GPU read domains will
3678          * need to be invalidated at next use.
3679          */
3680         if (write) {
3681                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3682                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3683         }
3684
3685         trace_i915_gem_object_change_domain(obj,
3686                                             old_read_domains,
3687                                             old_write_domain);
3688
3689         return 0;
3690 }
3691
3692 /* Throttle our rendering by waiting until the ring has completed our requests
3693  * emitted over 20 msec ago.
3694  *
3695  * Note that if we were to use the current jiffies each time around the loop,
3696  * we wouldn't escape the function with any frames outstanding if the time to
3697  * render a frame was over 20ms.
3698  *
3699  * This should get us reasonable parallelism between CPU and GPU but also
3700  * relatively low latency when blocking on a particular request to finish.
3701  */
3702 static int
3703 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3704 {
3705         struct drm_i915_private *dev_priv = to_i915(dev);
3706         struct drm_i915_file_private *file_priv = file->driver_priv;
3707         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3708         struct drm_i915_gem_request *request, *target = NULL;
3709         int ret;
3710
3711         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3712         if (ret)
3713                 return ret;
3714
3715         /* ABI: return -EIO if already wedged */
3716         if (i915_terminally_wedged(&dev_priv->gpu_error))
3717                 return -EIO;
3718
3719         spin_lock(&file_priv->mm.lock);
3720         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3721                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3722                         break;
3723
3724                 /*
3725                  * Note that the request might not have been submitted yet.
3726                  * In which case emitted_jiffies will be zero.
3727                  */
3728                 if (!request->emitted_jiffies)
3729                         continue;
3730
3731                 target = request;
3732         }
3733         if (target)
3734                 i915_gem_request_get(target);
3735         spin_unlock(&file_priv->mm.lock);
3736
3737         if (target == NULL)
3738                 return 0;
3739
3740         ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
3741         i915_gem_request_put(target);
3742
3743         return ret;
3744 }
3745
3746 static bool
3747 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3748 {
3749         if (!drm_mm_node_allocated(&vma->node))
3750                 return false;
3751
3752         if (vma->node.size < size)
3753                 return true;
3754
3755         if (alignment && vma->node.start & (alignment - 1))
3756                 return true;
3757
3758         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
3759                 return true;
3760
3761         if (flags & PIN_OFFSET_BIAS &&
3762             vma->node.start < (flags & PIN_OFFSET_MASK))
3763                 return true;
3764
3765         if (flags & PIN_OFFSET_FIXED &&
3766             vma->node.start != (flags & PIN_OFFSET_MASK))
3767                 return true;
3768
3769         return false;
3770 }
3771
3772 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3773 {
3774         struct drm_i915_gem_object *obj = vma->obj;
3775         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3776         bool mappable, fenceable;
3777         u32 fence_size, fence_alignment;
3778
3779         fence_size = i915_gem_get_ggtt_size(dev_priv,
3780                                             vma->size,
3781                                             i915_gem_object_get_tiling(obj));
3782         fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3783                                                       vma->size,
3784                                                       i915_gem_object_get_tiling(obj),
3785                                                       true);
3786
3787         fenceable = (vma->node.size == fence_size &&
3788                      (vma->node.start & (fence_alignment - 1)) == 0);
3789
3790         mappable = (vma->node.start + fence_size <=
3791                     dev_priv->ggtt.mappable_end);
3792
3793         if (mappable && fenceable)
3794                 vma->flags |= I915_VMA_CAN_FENCE;
3795         else
3796                 vma->flags &= ~I915_VMA_CAN_FENCE;
3797 }
3798
3799 int __i915_vma_do_pin(struct i915_vma *vma,
3800                       u64 size, u64 alignment, u64 flags)
3801 {
3802         unsigned int bound = vma->flags;
3803         int ret;
3804
3805         GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3806         GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
3807
3808         if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3809                 ret = -EBUSY;
3810                 goto err;
3811         }
3812
3813         if ((bound & I915_VMA_BIND_MASK) == 0) {
3814                 ret = i915_vma_insert(vma, size, alignment, flags);
3815                 if (ret)
3816                         goto err;
3817         }
3818
3819         ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3820         if (ret)
3821                 goto err;
3822
3823         if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3824                 __i915_vma_set_map_and_fenceable(vma);
3825
3826         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3827         return 0;
3828
3829 err:
3830         __i915_vma_unpin(vma);
3831         return ret;
3832 }
3833
3834 struct i915_vma *
3835 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3836                          const struct i915_ggtt_view *view,
3837                          u64 size,
3838                          u64 alignment,
3839                          u64 flags)
3840 {
3841         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3842         struct i915_address_space *vm = &dev_priv->ggtt.base;
3843         struct i915_vma *vma;
3844         int ret;
3845
3846         vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3847         if (IS_ERR(vma))
3848                 return vma;
3849
3850         if (i915_vma_misplaced(vma, size, alignment, flags)) {
3851                 if (flags & PIN_NONBLOCK &&
3852                     (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3853                         return ERR_PTR(-ENOSPC);
3854
3855                 if (flags & PIN_MAPPABLE) {
3856                         u32 fence_size;
3857
3858                         fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3859                                                             i915_gem_object_get_tiling(obj));
3860                         /* If the required space is larger than the available
3861                          * aperture, we will not able to find a slot for the
3862                          * object and unbinding the object now will be in
3863                          * vain. Worse, doing so may cause us to ping-pong
3864                          * the object in and out of the Global GTT and
3865                          * waste a lot of cycles under the mutex.
3866                          */
3867                         if (fence_size > dev_priv->ggtt.mappable_end)
3868                                 return ERR_PTR(-E2BIG);
3869
3870                         /* If NONBLOCK is set the caller is optimistically
3871                          * trying to cache the full object within the mappable
3872                          * aperture, and *must* have a fallback in place for
3873                          * situations where we cannot bind the object. We
3874                          * can be a little more lax here and use the fallback
3875                          * more often to avoid costly migrations of ourselves
3876                          * and other objects within the aperture.
3877                          *
3878                          * Half-the-aperture is used as a simple heuristic.
3879                          * More interesting would to do search for a free
3880                          * block prior to making the commitment to unbind.
3881                          * That caters for the self-harm case, and with a
3882                          * little more heuristics (e.g. NOFAULT, NOEVICT)
3883                          * we could try to minimise harm to others.
3884                          */
3885                         if (flags & PIN_NONBLOCK &&
3886                             fence_size > dev_priv->ggtt.mappable_end / 2)
3887                                 return ERR_PTR(-ENOSPC);
3888                 }
3889
3890                 WARN(i915_vma_is_pinned(vma),
3891                      "bo is already pinned in ggtt with incorrect alignment:"
3892                      " offset=%08x, req.alignment=%llx,"
3893                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3894                      i915_ggtt_offset(vma), alignment,
3895                      !!(flags & PIN_MAPPABLE),
3896                      i915_vma_is_map_and_fenceable(vma));
3897                 ret = i915_vma_unbind(vma);
3898                 if (ret)
3899                         return ERR_PTR(ret);
3900         }
3901
3902         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3903         if (ret)
3904                 return ERR_PTR(ret);
3905
3906         return vma;
3907 }
3908
3909 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3910 {
3911         /* Note that we could alias engines in the execbuf API, but
3912          * that would be very unwise as it prevents userspace from
3913          * fine control over engine selection. Ahem.
3914          *
3915          * This should be something like EXEC_MAX_ENGINE instead of
3916          * I915_NUM_ENGINES.
3917          */
3918         BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3919         return 0x10000 << id;
3920 }
3921
3922 static __always_inline unsigned int __busy_write_id(unsigned int id)
3923 {
3924         /* The uABI guarantees an active writer is also amongst the read
3925          * engines. This would be true if we accessed the activity tracking
3926          * under the lock, but as we perform the lookup of the object and
3927          * its activity locklessly we can not guarantee that the last_write
3928          * being active implies that we have set the same engine flag from
3929          * last_read - hence we always set both read and write busy for
3930          * last_write.
3931          */
3932         return id | __busy_read_flag(id);
3933 }
3934
3935 static __always_inline unsigned int
3936 __busy_set_if_active(const struct i915_gem_active *active,
3937                      unsigned int (*flag)(unsigned int id))
3938 {
3939         struct drm_i915_gem_request *request;
3940
3941         request = rcu_dereference(active->request);
3942         if (!request || i915_gem_request_completed(request))
3943                 return 0;
3944
3945         /* This is racy. See __i915_gem_active_get_rcu() for an in detail
3946          * discussion of how to handle the race correctly, but for reporting
3947          * the busy state we err on the side of potentially reporting the
3948          * wrong engine as being busy (but we guarantee that the result
3949          * is at least self-consistent).
3950          *
3951          * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
3952          * whilst we are inspecting it, even under the RCU read lock as we are.
3953          * This means that there is a small window for the engine and/or the
3954          * seqno to have been overwritten. The seqno will always be in the
3955          * future compared to the intended, and so we know that if that
3956          * seqno is idle (on whatever engine) our request is idle and the
3957          * return 0 above is correct.
3958          *
3959          * The issue is that if the engine is switched, it is just as likely
3960          * to report that it is busy (but since the switch happened, we know
3961          * the request should be idle). So there is a small chance that a busy
3962          * result is actually the wrong engine.
3963          *
3964          * So why don't we care?
3965          *
3966          * For starters, the busy ioctl is a heuristic that is by definition
3967          * racy. Even with perfect serialisation in the driver, the hardware
3968          * state is constantly advancing - the state we report to the user
3969          * is stale.
3970          *
3971          * The critical information for the busy-ioctl is whether the object
3972          * is idle as userspace relies on that to detect whether its next
3973          * access will stall, or if it has missed submitting commands to
3974          * the hardware allowing the GPU to stall. We never generate a
3975          * false-positive for idleness, thus busy-ioctl is reliable at the
3976          * most fundamental level, and we maintain the guarantee that a
3977          * busy object left to itself will eventually become idle (and stay
3978          * idle!).
3979          *
3980          * We allow ourselves the leeway of potentially misreporting the busy
3981          * state because that is an optimisation heuristic that is constantly
3982          * in flux. Being quickly able to detect the busy/idle state is much
3983          * more important than accurate logging of exactly which engines were
3984          * busy.
3985          *
3986          * For accuracy in reporting the engine, we could use
3987          *
3988          *      result = 0;
3989          *      request = __i915_gem_active_get_rcu(active);
3990          *      if (request) {
3991          *              if (!i915_gem_request_completed(request))
3992          *                      result = flag(request->engine->exec_id);
3993          *              i915_gem_request_put(request);
3994          *      }
3995          *
3996          * but that still remains susceptible to both hardware and userspace
3997          * races. So we accept making the result of that race slightly worse,
3998          * given the rarity of the race and its low impact on the result.
3999          */
4000         return flag(READ_ONCE(request->engine->exec_id));
4001 }
4002
4003 static __always_inline unsigned int
4004 busy_check_reader(const struct i915_gem_active *active)
4005 {
4006         return __busy_set_if_active(active, __busy_read_flag);
4007 }
4008
4009 static __always_inline unsigned int
4010 busy_check_writer(const struct i915_gem_active *active)
4011 {
4012         return __busy_set_if_active(active, __busy_write_id);
4013 }
4014
4015 int
4016 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4017                     struct drm_file *file)
4018 {
4019         struct drm_i915_gem_busy *args = data;
4020         struct drm_i915_gem_object *obj;
4021         unsigned long active;
4022
4023         obj = i915_gem_object_lookup(file, args->handle);
4024         if (!obj)
4025                 return -ENOENT;
4026
4027         args->busy = 0;
4028         active = __I915_BO_ACTIVE(obj);
4029         if (active) {
4030                 int idx;
4031
4032                 /* Yes, the lookups are intentionally racy.
4033                  *
4034                  * First, we cannot simply rely on __I915_BO_ACTIVE. We have
4035                  * to regard the value as stale and as our ABI guarantees
4036                  * forward progress, we confirm the status of each active
4037                  * request with the hardware.
4038                  *
4039                  * Even though we guard the pointer lookup by RCU, that only
4040                  * guarantees that the pointer and its contents remain
4041                  * dereferencable and does *not* mean that the request we
4042                  * have is the same as the one being tracked by the object.
4043                  *
4044                  * Consider that we lookup the request just as it is being
4045                  * retired and freed. We take a local copy of the pointer,
4046                  * but before we add its engine into the busy set, the other
4047                  * thread reallocates it and assigns it to a task on another
4048                  * engine with a fresh and incomplete seqno. Guarding against
4049                  * that requires careful serialisation and reference counting,
4050                  * i.e. using __i915_gem_active_get_request_rcu(). We don't,
4051                  * instead we expect that if the result is busy, which engines
4052                  * are busy is not completely reliable - we only guarantee
4053                  * that the object was busy.
4054                  */
4055                 rcu_read_lock();
4056
4057                 for_each_active(active, idx)
4058                         args->busy |= busy_check_reader(&obj->last_read[idx]);
4059
4060                 /* For ABI sanity, we only care that the write engine is in
4061                  * the set of read engines. This should be ensured by the
4062                  * ordering of setting last_read/last_write in
4063                  * i915_vma_move_to_active(), and then in reverse in retire.
4064                  * However, for good measure, we always report the last_write
4065                  * request as a busy read as well as being a busy write.
4066                  *
4067                  * We don't care that the set of active read/write engines
4068                  * may change during construction of the result, as it is
4069                  * equally liable to change before userspace can inspect
4070                  * the result.
4071                  */
4072                 args->busy |= busy_check_writer(&obj->last_write);
4073
4074                 rcu_read_unlock();
4075         }
4076
4077         i915_gem_object_put_unlocked(obj);
4078         return 0;
4079 }
4080
4081 int
4082 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4083                         struct drm_file *file_priv)
4084 {
4085         return i915_gem_ring_throttle(dev, file_priv);
4086 }
4087
4088 int
4089 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4090                        struct drm_file *file_priv)
4091 {
4092         struct drm_i915_private *dev_priv = to_i915(dev);
4093         struct drm_i915_gem_madvise *args = data;
4094         struct drm_i915_gem_object *obj;
4095         int ret;
4096
4097         switch (args->madv) {
4098         case I915_MADV_DONTNEED:
4099         case I915_MADV_WILLNEED:
4100             break;
4101         default:
4102             return -EINVAL;
4103         }
4104
4105         ret = i915_mutex_lock_interruptible(dev);
4106         if (ret)
4107                 return ret;
4108
4109         obj = i915_gem_object_lookup(file_priv, args->handle);
4110         if (!obj) {
4111                 ret = -ENOENT;
4112                 goto unlock;
4113         }
4114
4115         if (obj->pages &&
4116             i915_gem_object_is_tiled(obj) &&
4117             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4118                 if (obj->madv == I915_MADV_WILLNEED)
4119                         i915_gem_object_unpin_pages(obj);
4120                 if (args->madv == I915_MADV_WILLNEED)
4121                         i915_gem_object_pin_pages(obj);
4122         }
4123
4124         if (obj->madv != __I915_MADV_PURGED)
4125                 obj->madv = args->madv;
4126
4127         /* if the object is no longer attached, discard its backing storage */
4128         if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4129                 i915_gem_object_truncate(obj);
4130
4131         args->retained = obj->madv != __I915_MADV_PURGED;
4132
4133         i915_gem_object_put(obj);
4134 unlock:
4135         mutex_unlock(&dev->struct_mutex);
4136         return ret;
4137 }
4138
4139 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4140                           const struct drm_i915_gem_object_ops *ops)
4141 {
4142         int i;
4143
4144         INIT_LIST_HEAD(&obj->global_list);
4145         INIT_LIST_HEAD(&obj->userfault_link);
4146         for (i = 0; i < I915_NUM_ENGINES; i++)
4147                 init_request_active(&obj->last_read[i],
4148                                     i915_gem_object_retire__read);
4149         init_request_active(&obj->last_write,
4150                             i915_gem_object_retire__write);
4151         INIT_LIST_HEAD(&obj->obj_exec_link);
4152         INIT_LIST_HEAD(&obj->vma_list);
4153         INIT_LIST_HEAD(&obj->batch_pool_link);
4154
4155         obj->ops = ops;
4156
4157         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4158         obj->madv = I915_MADV_WILLNEED;
4159
4160         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4161 }
4162
4163 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4164         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4165         .get_pages = i915_gem_object_get_pages_gtt,
4166         .put_pages = i915_gem_object_put_pages_gtt,
4167 };
4168
4169 /* Note we don't consider signbits :| */
4170 #define overflows_type(x, T) \
4171         (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
4172
4173 struct drm_i915_gem_object *
4174 i915_gem_object_create(struct drm_device *dev, u64 size)
4175 {
4176         struct drm_i915_gem_object *obj;
4177         struct address_space *mapping;
4178         gfp_t mask;
4179         int ret;
4180
4181         /* There is a prevalence of the assumption that we fit the object's
4182          * page count inside a 32bit _signed_ variable. Let's document this and
4183          * catch if we ever need to fix it. In the meantime, if you do spot
4184          * such a local variable, please consider fixing!
4185          */
4186         if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
4187                 return ERR_PTR(-E2BIG);
4188
4189         if (overflows_type(size, obj->base.size))
4190                 return ERR_PTR(-E2BIG);
4191
4192         obj = i915_gem_object_alloc(dev);
4193         if (obj == NULL)
4194                 return ERR_PTR(-ENOMEM);
4195
4196         ret = drm_gem_object_init(dev, &obj->base, size);
4197         if (ret)
4198                 goto fail;
4199
4200         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4201         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4202                 /* 965gm cannot relocate objects above 4GiB. */
4203                 mask &= ~__GFP_HIGHMEM;
4204                 mask |= __GFP_DMA32;
4205         }
4206
4207         mapping = obj->base.filp->f_mapping;
4208         mapping_set_gfp_mask(mapping, mask);
4209
4210         i915_gem_object_init(obj, &i915_gem_object_ops);
4211
4212         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4213         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4214
4215         if (HAS_LLC(dev)) {
4216                 /* On some devices, we can have the GPU use the LLC (the CPU
4217                  * cache) for about a 10% performance improvement
4218                  * compared to uncached.  Graphics requests other than
4219                  * display scanout are coherent with the CPU in
4220                  * accessing this cache.  This means in this mode we
4221                  * don't need to clflush on the CPU side, and on the
4222                  * GPU side we only need to flush internal caches to
4223                  * get data visible to the CPU.
4224                  *
4225                  * However, we maintain the display planes as UC, and so
4226                  * need to rebind when first used as such.
4227                  */
4228                 obj->cache_level = I915_CACHE_LLC;
4229         } else
4230                 obj->cache_level = I915_CACHE_NONE;
4231
4232         trace_i915_gem_object_create(obj);
4233
4234         return obj;
4235
4236 fail:
4237         i915_gem_object_free(obj);
4238
4239         return ERR_PTR(ret);
4240 }
4241
4242 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4243 {
4244         /* If we are the last user of the backing storage (be it shmemfs
4245          * pages or stolen etc), we know that the pages are going to be
4246          * immediately released. In this case, we can then skip copying
4247          * back the contents from the GPU.
4248          */
4249
4250         if (obj->madv != I915_MADV_WILLNEED)
4251                 return false;
4252
4253         if (obj->base.filp == NULL)
4254                 return true;
4255
4256         /* At first glance, this looks racy, but then again so would be
4257          * userspace racing mmap against close. However, the first external
4258          * reference to the filp can only be obtained through the
4259          * i915_gem_mmap_ioctl() which safeguards us against the user
4260          * acquiring such a reference whilst we are in the middle of
4261          * freeing the object.
4262          */
4263         return atomic_long_read(&obj->base.filp->f_count) == 1;
4264 }
4265
4266 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4267 {
4268         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4269         struct drm_device *dev = obj->base.dev;
4270         struct drm_i915_private *dev_priv = to_i915(dev);
4271         struct i915_vma *vma, *next;
4272
4273         intel_runtime_pm_get(dev_priv);
4274
4275         trace_i915_gem_object_destroy(obj);
4276
4277         /* All file-owned VMA should have been released by this point through
4278          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4279          * However, the object may also be bound into the global GTT (e.g.
4280          * older GPUs without per-process support, or for direct access through
4281          * the GTT either for the user or for scanout). Those VMA still need to
4282          * unbound now.
4283          */
4284         list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4285                 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4286                 GEM_BUG_ON(i915_vma_is_active(vma));
4287                 vma->flags &= ~I915_VMA_PIN_MASK;
4288                 i915_vma_close(vma);
4289         }
4290         GEM_BUG_ON(obj->bind_count);
4291
4292         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4293          * before progressing. */
4294         if (obj->stolen)
4295                 i915_gem_object_unpin_pages(obj);
4296
4297         WARN_ON(atomic_read(&obj->frontbuffer_bits));
4298
4299         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4300             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4301             i915_gem_object_is_tiled(obj))
4302                 i915_gem_object_unpin_pages(obj);
4303
4304         if (WARN_ON(obj->pages_pin_count))
4305                 obj->pages_pin_count = 0;
4306         if (discard_backing_storage(obj))
4307                 obj->madv = I915_MADV_DONTNEED;
4308         i915_gem_object_put_pages(obj);
4309
4310         BUG_ON(obj->pages);
4311
4312         if (obj->base.import_attach)
4313                 drm_prime_gem_destroy(&obj->base, NULL);
4314
4315         if (obj->ops->release)
4316                 obj->ops->release(obj);
4317
4318         drm_gem_object_release(&obj->base);
4319         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4320
4321         kfree(obj->bit_17);
4322         i915_gem_object_free(obj);
4323
4324         intel_runtime_pm_put(dev_priv);
4325 }
4326
4327 int i915_gem_suspend(struct drm_device *dev)
4328 {
4329         struct drm_i915_private *dev_priv = to_i915(dev);
4330         int ret;
4331
4332         intel_suspend_gt_powersave(dev_priv);
4333
4334         mutex_lock(&dev->struct_mutex);
4335
4336         /* We have to flush all the executing contexts to main memory so
4337          * that they can saved in the hibernation image. To ensure the last
4338          * context image is coherent, we have to switch away from it. That
4339          * leaves the dev_priv->kernel_context still active when
4340          * we actually suspend, and its image in memory may not match the GPU
4341          * state. Fortunately, the kernel_context is disposable and we do
4342          * not rely on its state.
4343          */
4344         ret = i915_gem_switch_to_kernel_context(dev_priv);
4345         if (ret)
4346                 goto err;
4347
4348         ret = i915_gem_wait_for_idle(dev_priv,
4349                                      I915_WAIT_INTERRUPTIBLE |
4350                                      I915_WAIT_LOCKED);
4351         if (ret)
4352                 goto err;
4353
4354         i915_gem_retire_requests(dev_priv);
4355
4356         i915_gem_context_lost(dev_priv);
4357         mutex_unlock(&dev->struct_mutex);
4358
4359         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4360         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4361         flush_delayed_work(&dev_priv->gt.idle_work);
4362
4363         /* Assert that we sucessfully flushed all the work and
4364          * reset the GPU back to its idle, low power state.
4365          */
4366         WARN_ON(dev_priv->gt.awake);
4367
4368         /*
4369          * Neither the BIOS, ourselves or any other kernel
4370          * expects the system to be in execlists mode on startup,
4371          * so we need to reset the GPU back to legacy mode. And the only
4372          * known way to disable logical contexts is through a GPU reset.
4373          *
4374          * So in order to leave the system in a known default configuration,
4375          * always reset the GPU upon unload and suspend. Afterwards we then
4376          * clean up the GEM state tracking, flushing off the requests and
4377          * leaving the system in a known idle state.
4378          *
4379          * Note that is of the upmost importance that the GPU is idle and
4380          * all stray writes are flushed *before* we dismantle the backing
4381          * storage for the pinned objects.
4382          *
4383          * However, since we are uncertain that resetting the GPU on older
4384          * machines is a good idea, we don't - just in case it leaves the
4385          * machine in an unusable condition.
4386          */
4387         if (HAS_HW_CONTEXTS(dev)) {
4388                 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
4389                 WARN_ON(reset && reset != -ENODEV);
4390         }
4391
4392         return 0;
4393
4394 err:
4395         mutex_unlock(&dev->struct_mutex);
4396         return ret;
4397 }
4398
4399 void i915_gem_resume(struct drm_device *dev)
4400 {
4401         struct drm_i915_private *dev_priv = to_i915(dev);
4402
4403         mutex_lock(&dev->struct_mutex);
4404         i915_gem_restore_gtt_mappings(dev);
4405
4406         /* As we didn't flush the kernel context before suspend, we cannot
4407          * guarantee that the context image is complete. So let's just reset
4408          * it and start again.
4409          */
4410         dev_priv->gt.resume(dev_priv);
4411
4412         mutex_unlock(&dev->struct_mutex);
4413 }
4414
4415 void i915_gem_init_swizzling(struct drm_device *dev)
4416 {
4417         struct drm_i915_private *dev_priv = to_i915(dev);
4418
4419         if (INTEL_INFO(dev)->gen < 5 ||
4420             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4421                 return;
4422
4423         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4424                                  DISP_TILE_SURFACE_SWIZZLING);
4425
4426         if (IS_GEN5(dev_priv))
4427                 return;
4428
4429         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4430         if (IS_GEN6(dev_priv))
4431                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4432         else if (IS_GEN7(dev_priv))
4433                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4434         else if (IS_GEN8(dev_priv))
4435                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4436         else
4437                 BUG();
4438 }
4439
4440 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4441 {
4442         I915_WRITE(RING_CTL(base), 0);
4443         I915_WRITE(RING_HEAD(base), 0);
4444         I915_WRITE(RING_TAIL(base), 0);
4445         I915_WRITE(RING_START(base), 0);
4446 }
4447
4448 static void init_unused_rings(struct drm_i915_private *dev_priv)
4449 {
4450         if (IS_I830(dev_priv)) {
4451                 init_unused_ring(dev_priv, PRB1_BASE);
4452                 init_unused_ring(dev_priv, SRB0_BASE);
4453                 init_unused_ring(dev_priv, SRB1_BASE);
4454                 init_unused_ring(dev_priv, SRB2_BASE);
4455                 init_unused_ring(dev_priv, SRB3_BASE);
4456         } else if (IS_GEN2(dev_priv)) {
4457                 init_unused_ring(dev_priv, SRB0_BASE);
4458                 init_unused_ring(dev_priv, SRB1_BASE);
4459         } else if (IS_GEN3(dev_priv)) {
4460                 init_unused_ring(dev_priv, PRB1_BASE);
4461                 init_unused_ring(dev_priv, PRB2_BASE);
4462         }
4463 }
4464
4465 int
4466 i915_gem_init_hw(struct drm_device *dev)
4467 {
4468         struct drm_i915_private *dev_priv = to_i915(dev);
4469         struct intel_engine_cs *engine;
4470         enum intel_engine_id id;
4471         int ret;
4472
4473         /* Double layer security blanket, see i915_gem_init() */
4474         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4475
4476         if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4477                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4478
4479         if (IS_HASWELL(dev_priv))
4480                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4481                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4482
4483         if (HAS_PCH_NOP(dev_priv)) {
4484                 if (IS_IVYBRIDGE(dev_priv)) {
4485                         u32 temp = I915_READ(GEN7_MSG_CTL);
4486                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4487                         I915_WRITE(GEN7_MSG_CTL, temp);
4488                 } else if (INTEL_INFO(dev)->gen >= 7) {
4489                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4490                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4491                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4492                 }
4493         }
4494
4495         i915_gem_init_swizzling(dev);
4496
4497         /*
4498          * At least 830 can leave some of the unused rings
4499          * "active" (ie. head != tail) after resume which
4500          * will prevent c3 entry. Makes sure all unused rings
4501          * are totally idle.
4502          */
4503         init_unused_rings(dev_priv);
4504
4505         BUG_ON(!dev_priv->kernel_context);
4506
4507         ret = i915_ppgtt_init_hw(dev);
4508         if (ret) {
4509                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4510                 goto out;
4511         }
4512
4513         /* Need to do basic initialisation of all rings first: */
4514         for_each_engine(engine, dev_priv, id) {
4515                 ret = engine->init_hw(engine);
4516                 if (ret)
4517                         goto out;
4518         }
4519
4520         intel_mocs_init_l3cc_table(dev);
4521
4522         /* We can't enable contexts until all firmware is loaded */
4523         ret = intel_guc_setup(dev);
4524         if (ret)
4525                 goto out;
4526
4527 out:
4528         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4529         return ret;
4530 }
4531
4532 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4533 {
4534         if (INTEL_INFO(dev_priv)->gen < 6)
4535                 return false;
4536
4537         /* TODO: make semaphores and Execlists play nicely together */
4538         if (i915.enable_execlists)
4539                 return false;
4540
4541         if (value >= 0)
4542                 return value;
4543
4544 #ifdef CONFIG_INTEL_IOMMU
4545         /* Enable semaphores on SNB when IO remapping is off */
4546         if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4547                 return false;
4548 #endif
4549
4550         return true;
4551 }
4552
4553 int i915_gem_init(struct drm_device *dev)
4554 {
4555         struct drm_i915_private *dev_priv = to_i915(dev);
4556         int ret;
4557
4558         mutex_lock(&dev->struct_mutex);
4559
4560         if (!i915.enable_execlists) {
4561                 dev_priv->gt.resume = intel_legacy_submission_resume;
4562                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4563         } else {
4564                 dev_priv->gt.resume = intel_lr_context_resume;
4565                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4566         }
4567
4568         /* This is just a security blanket to placate dragons.
4569          * On some systems, we very sporadically observe that the first TLBs
4570          * used by the CS may be stale, despite us poking the TLB reset. If
4571          * we hold the forcewake during initialisation these problems
4572          * just magically go away.
4573          */
4574         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4575
4576         i915_gem_init_userptr(dev_priv);
4577
4578         ret = i915_gem_init_ggtt(dev_priv);
4579         if (ret)
4580                 goto out_unlock;
4581
4582         ret = i915_gem_context_init(dev);
4583         if (ret)
4584                 goto out_unlock;
4585
4586         ret = intel_engines_init(dev);
4587         if (ret)
4588                 goto out_unlock;
4589
4590         ret = i915_gem_init_hw(dev);
4591         if (ret == -EIO) {
4592                 /* Allow engine initialisation to fail by marking the GPU as
4593                  * wedged. But we only want to do this where the GPU is angry,
4594                  * for all other failure, such as an allocation failure, bail.
4595                  */
4596                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4597                 i915_gem_set_wedged(dev_priv);
4598                 ret = 0;
4599         }
4600
4601 out_unlock:
4602         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4603         mutex_unlock(&dev->struct_mutex);
4604
4605         return ret;
4606 }
4607
4608 void
4609 i915_gem_cleanup_engines(struct drm_device *dev)
4610 {
4611         struct drm_i915_private *dev_priv = to_i915(dev);
4612         struct intel_engine_cs *engine;
4613         enum intel_engine_id id;
4614
4615         for_each_engine(engine, dev_priv, id)
4616                 dev_priv->gt.cleanup_engine(engine);
4617 }
4618
4619 void
4620 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4621 {
4622         struct drm_device *dev = &dev_priv->drm;
4623         int i;
4624
4625         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4626             !IS_CHERRYVIEW(dev_priv))
4627                 dev_priv->num_fence_regs = 32;
4628         else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4629                  IS_I945GM(dev_priv) || IS_G33(dev_priv))
4630                 dev_priv->num_fence_regs = 16;
4631         else
4632                 dev_priv->num_fence_regs = 8;
4633
4634         if (intel_vgpu_active(dev_priv))
4635                 dev_priv->num_fence_regs =
4636                                 I915_READ(vgtif_reg(avail_rs.fence_num));
4637
4638         /* Initialize fence registers to zero */
4639         for (i = 0; i < dev_priv->num_fence_regs; i++) {
4640                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4641
4642                 fence->i915 = dev_priv;
4643                 fence->id = i;
4644                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4645         }
4646         i915_gem_restore_fences(dev);
4647
4648         i915_gem_detect_bit_6_swizzle(dev);
4649 }
4650
4651 void
4652 i915_gem_load_init(struct drm_device *dev)
4653 {
4654         struct drm_i915_private *dev_priv = to_i915(dev);
4655
4656         dev_priv->objects =
4657                 kmem_cache_create("i915_gem_object",
4658                                   sizeof(struct drm_i915_gem_object), 0,
4659                                   SLAB_HWCACHE_ALIGN,
4660                                   NULL);
4661         dev_priv->vmas =
4662                 kmem_cache_create("i915_gem_vma",
4663                                   sizeof(struct i915_vma), 0,
4664                                   SLAB_HWCACHE_ALIGN,
4665                                   NULL);
4666         dev_priv->requests =
4667                 kmem_cache_create("i915_gem_request",
4668                                   sizeof(struct drm_i915_gem_request), 0,
4669                                   SLAB_HWCACHE_ALIGN |
4670                                   SLAB_RECLAIM_ACCOUNT |
4671                                   SLAB_DESTROY_BY_RCU,
4672                                   NULL);
4673
4674         INIT_LIST_HEAD(&dev_priv->context_list);
4675         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4676         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4677         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4678         INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4679         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4680                           i915_gem_retire_work_handler);
4681         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4682                           i915_gem_idle_work_handler);
4683         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4684         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4685
4686         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4687
4688         init_waitqueue_head(&dev_priv->pending_flip_queue);
4689
4690         dev_priv->mm.interruptible = true;
4691
4692         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4693
4694         spin_lock_init(&dev_priv->fb_tracking.lock);
4695 }
4696
4697 void i915_gem_load_cleanup(struct drm_device *dev)
4698 {
4699         struct drm_i915_private *dev_priv = to_i915(dev);
4700
4701         kmem_cache_destroy(dev_priv->requests);
4702         kmem_cache_destroy(dev_priv->vmas);
4703         kmem_cache_destroy(dev_priv->objects);
4704
4705         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4706         rcu_barrier();
4707 }
4708
4709 int i915_gem_freeze(struct drm_i915_private *dev_priv)
4710 {
4711         intel_runtime_pm_get(dev_priv);
4712
4713         mutex_lock(&dev_priv->drm.struct_mutex);
4714         i915_gem_shrink_all(dev_priv);
4715         mutex_unlock(&dev_priv->drm.struct_mutex);
4716
4717         intel_runtime_pm_put(dev_priv);
4718
4719         return 0;
4720 }
4721
4722 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4723 {
4724         struct drm_i915_gem_object *obj;
4725         struct list_head *phases[] = {
4726                 &dev_priv->mm.unbound_list,
4727                 &dev_priv->mm.bound_list,
4728                 NULL
4729         }, **p;
4730
4731         /* Called just before we write the hibernation image.
4732          *
4733          * We need to update the domain tracking to reflect that the CPU
4734          * will be accessing all the pages to create and restore from the
4735          * hibernation, and so upon restoration those pages will be in the
4736          * CPU domain.
4737          *
4738          * To make sure the hibernation image contains the latest state,
4739          * we update that state just before writing out the image.
4740          *
4741          * To try and reduce the hibernation image, we manually shrink
4742          * the objects as well.
4743          */
4744
4745         mutex_lock(&dev_priv->drm.struct_mutex);
4746         i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4747
4748         for (p = phases; *p; p++) {
4749                 list_for_each_entry(obj, *p, global_list) {
4750                         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4751                         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4752                 }
4753         }
4754         mutex_unlock(&dev_priv->drm.struct_mutex);
4755
4756         return 0;
4757 }
4758
4759 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4760 {
4761         struct drm_i915_file_private *file_priv = file->driver_priv;
4762         struct drm_i915_gem_request *request;
4763
4764         /* Clean up our request list when the client is going away, so that
4765          * later retire_requests won't dereference our soon-to-be-gone
4766          * file_priv.
4767          */
4768         spin_lock(&file_priv->mm.lock);
4769         list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4770                 request->file_priv = NULL;
4771         spin_unlock(&file_priv->mm.lock);
4772
4773         if (!list_empty(&file_priv->rps.link)) {
4774                 spin_lock(&to_i915(dev)->rps.client_lock);
4775                 list_del(&file_priv->rps.link);
4776                 spin_unlock(&to_i915(dev)->rps.client_lock);
4777         }
4778 }
4779
4780 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4781 {
4782         struct drm_i915_file_private *file_priv;
4783         int ret;
4784
4785         DRM_DEBUG_DRIVER("\n");
4786
4787         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4788         if (!file_priv)
4789                 return -ENOMEM;
4790
4791         file->driver_priv = file_priv;
4792         file_priv->dev_priv = to_i915(dev);
4793         file_priv->file = file;
4794         INIT_LIST_HEAD(&file_priv->rps.link);
4795
4796         spin_lock_init(&file_priv->mm.lock);
4797         INIT_LIST_HEAD(&file_priv->mm.request_list);
4798
4799         file_priv->bsd_engine = -1;
4800
4801         ret = i915_gem_context_open(dev, file);
4802         if (ret)
4803                 kfree(file_priv);
4804
4805         return ret;
4806 }
4807
4808 /**
4809  * i915_gem_track_fb - update frontbuffer tracking
4810  * @old: current GEM buffer for the frontbuffer slots
4811  * @new: new GEM buffer for the frontbuffer slots
4812  * @frontbuffer_bits: bitmask of frontbuffer slots
4813  *
4814  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4815  * from @old and setting them in @new. Both @old and @new can be NULL.
4816  */
4817 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4818                        struct drm_i915_gem_object *new,
4819                        unsigned frontbuffer_bits)
4820 {
4821         /* Control of individual bits within the mask are guarded by
4822          * the owning plane->mutex, i.e. we can never see concurrent
4823          * manipulation of individual bits. But since the bitfield as a whole
4824          * is updated using RMW, we need to use atomics in order to update
4825          * the bits.
4826          */
4827         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4828                      sizeof(atomic_t) * BITS_PER_BYTE);
4829
4830         if (old) {
4831                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4832                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4833         }
4834
4835         if (new) {
4836                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4837                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4838         }
4839 }
4840
4841 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4842 struct page *
4843 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4844 {
4845         struct page *page;
4846
4847         /* Only default objects have per-page dirty tracking */
4848         if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4849                 return NULL;
4850
4851         page = i915_gem_object_get_page(obj, n);
4852         set_page_dirty(page);
4853         return page;
4854 }
4855
4856 /* Allocate a new GEM object and fill it with the supplied data */
4857 struct drm_i915_gem_object *
4858 i915_gem_object_create_from_data(struct drm_device *dev,
4859                                  const void *data, size_t size)
4860 {
4861         struct drm_i915_gem_object *obj;
4862         struct sg_table *sg;
4863         size_t bytes;
4864         int ret;
4865
4866         obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4867         if (IS_ERR(obj))
4868                 return obj;
4869
4870         ret = i915_gem_object_set_to_cpu_domain(obj, true);
4871         if (ret)
4872                 goto fail;
4873
4874         ret = i915_gem_object_get_pages(obj);
4875         if (ret)
4876                 goto fail;
4877
4878         i915_gem_object_pin_pages(obj);
4879         sg = obj->pages;
4880         bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4881         obj->dirty = 1;         /* Backing store is now out of date */
4882         i915_gem_object_unpin_pages(obj);
4883
4884         if (WARN_ON(bytes != size)) {
4885                 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4886                 ret = -EFAULT;
4887                 goto fail;
4888         }
4889
4890         return obj;
4891
4892 fail:
4893         i915_gem_object_put(obj);
4894         return ERR_PTR(ret);
4895 }