drm/i915: Prevent lock-cycles between GPU waits and GPU resets
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/reservation.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mman.h>
40
41 #include "gem/i915_gem_clflush.h"
42 #include "gem/i915_gem_context.h"
43 #include "gem/i915_gem_ioctls.h"
44 #include "gem/i915_gem_pm.h"
45 #include "gem/i915_gemfs.h"
46 #include "gt/intel_engine_pm.h"
47 #include "gt/intel_gt_pm.h"
48 #include "gt/intel_mocs.h"
49 #include "gt/intel_reset.h"
50 #include "gt/intel_workarounds.h"
51
52 #include "i915_drv.h"
53 #include "i915_scatterlist.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
56
57 #include "intel_display.h"
58 #include "intel_drv.h"
59 #include "intel_frontbuffer.h"
60 #include "intel_pm.h"
61
62 static int
63 insert_mappable_node(struct i915_ggtt *ggtt,
64                      struct drm_mm_node *node, u32 size)
65 {
66         memset(node, 0, sizeof(*node));
67         return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
68                                            size, 0, I915_COLOR_UNEVICTABLE,
69                                            0, ggtt->mappable_end,
70                                            DRM_MM_INSERT_LOW);
71 }
72
73 static void
74 remove_mappable_node(struct drm_mm_node *node)
75 {
76         drm_mm_remove_node(node);
77 }
78
79 int
80 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
81                             struct drm_file *file)
82 {
83         struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
84         struct drm_i915_gem_get_aperture *args = data;
85         struct i915_vma *vma;
86         u64 pinned;
87
88         mutex_lock(&ggtt->vm.mutex);
89
90         pinned = ggtt->vm.reserved;
91         list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
92                 if (i915_vma_is_pinned(vma))
93                         pinned += vma->node.size;
94
95         mutex_unlock(&ggtt->vm.mutex);
96
97         args->aper_size = ggtt->vm.total;
98         args->aper_available_size = args->aper_size - pinned;
99
100         return 0;
101 }
102
103 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
104 {
105         struct i915_vma *vma;
106         LIST_HEAD(still_in_list);
107         int ret = 0;
108
109         lockdep_assert_held(&obj->base.dev->struct_mutex);
110
111         spin_lock(&obj->vma.lock);
112         while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
113                                                        struct i915_vma,
114                                                        obj_link))) {
115                 list_move_tail(&vma->obj_link, &still_in_list);
116                 spin_unlock(&obj->vma.lock);
117
118                 ret = i915_vma_unbind(vma);
119
120                 spin_lock(&obj->vma.lock);
121         }
122         list_splice(&still_in_list, &obj->vma.list);
123         spin_unlock(&obj->vma.lock);
124
125         return ret;
126 }
127
128 static int
129 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
130                      struct drm_i915_gem_pwrite *args,
131                      struct drm_file *file)
132 {
133         void *vaddr = obj->phys_handle->vaddr + args->offset;
134         char __user *user_data = u64_to_user_ptr(args->data_ptr);
135
136         /* We manually control the domain here and pretend that it
137          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
138          */
139         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
140         if (copy_from_user(vaddr, user_data, args->size))
141                 return -EFAULT;
142
143         drm_clflush_virt_range(vaddr, args->size);
144         i915_gem_chipset_flush(to_i915(obj->base.dev));
145
146         intel_fb_obj_flush(obj, ORIGIN_CPU);
147         return 0;
148 }
149
150 static int
151 i915_gem_create(struct drm_file *file,
152                 struct drm_i915_private *dev_priv,
153                 u64 *size_p,
154                 u32 *handle_p)
155 {
156         struct drm_i915_gem_object *obj;
157         u32 handle;
158         u64 size;
159         int ret;
160
161         size = round_up(*size_p, PAGE_SIZE);
162         if (size == 0)
163                 return -EINVAL;
164
165         /* Allocate the new object */
166         obj = i915_gem_object_create_shmem(dev_priv, size);
167         if (IS_ERR(obj))
168                 return PTR_ERR(obj);
169
170         ret = drm_gem_handle_create(file, &obj->base, &handle);
171         /* drop reference from allocate - handle holds it now */
172         i915_gem_object_put(obj);
173         if (ret)
174                 return ret;
175
176         *handle_p = handle;
177         *size_p = size;
178         return 0;
179 }
180
181 int
182 i915_gem_dumb_create(struct drm_file *file,
183                      struct drm_device *dev,
184                      struct drm_mode_create_dumb *args)
185 {
186         int cpp = DIV_ROUND_UP(args->bpp, 8);
187         u32 format;
188
189         switch (cpp) {
190         case 1:
191                 format = DRM_FORMAT_C8;
192                 break;
193         case 2:
194                 format = DRM_FORMAT_RGB565;
195                 break;
196         case 4:
197                 format = DRM_FORMAT_XRGB8888;
198                 break;
199         default:
200                 return -EINVAL;
201         }
202
203         /* have to work out size/pitch and return them */
204         args->pitch = ALIGN(args->width * cpp, 64);
205
206         /* align stride to page size so that we can remap */
207         if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
208                                                     DRM_FORMAT_MOD_LINEAR))
209                 args->pitch = ALIGN(args->pitch, 4096);
210
211         args->size = args->pitch * args->height;
212         return i915_gem_create(file, to_i915(dev),
213                                &args->size, &args->handle);
214 }
215
216 /**
217  * Creates a new mm object and returns a handle to it.
218  * @dev: drm device pointer
219  * @data: ioctl data blob
220  * @file: drm file pointer
221  */
222 int
223 i915_gem_create_ioctl(struct drm_device *dev, void *data,
224                       struct drm_file *file)
225 {
226         struct drm_i915_private *dev_priv = to_i915(dev);
227         struct drm_i915_gem_create *args = data;
228
229         i915_gem_flush_free_objects(dev_priv);
230
231         return i915_gem_create(file, dev_priv,
232                                &args->size, &args->handle);
233 }
234
235 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
236 {
237         intel_wakeref_t wakeref;
238
239         /*
240          * No actual flushing is required for the GTT write domain for reads
241          * from the GTT domain. Writes to it "immediately" go to main memory
242          * as far as we know, so there's no chipset flush. It also doesn't
243          * land in the GPU render cache.
244          *
245          * However, we do have to enforce the order so that all writes through
246          * the GTT land before any writes to the device, such as updates to
247          * the GATT itself.
248          *
249          * We also have to wait a bit for the writes to land from the GTT.
250          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
251          * timing. This issue has only been observed when switching quickly
252          * between GTT writes and CPU reads from inside the kernel on recent hw,
253          * and it appears to only affect discrete GTT blocks (i.e. on LLC
254          * system agents we cannot reproduce this behaviour, until Cannonlake
255          * that was!).
256          */
257
258         wmb();
259
260         if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
261                 return;
262
263         i915_gem_chipset_flush(dev_priv);
264
265         with_intel_runtime_pm(dev_priv, wakeref) {
266                 spin_lock_irq(&dev_priv->uncore.lock);
267
268                 POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
269
270                 spin_unlock_irq(&dev_priv->uncore.lock);
271         }
272 }
273
274 static int
275 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
276             bool needs_clflush)
277 {
278         char *vaddr;
279         int ret;
280
281         vaddr = kmap(page);
282
283         if (needs_clflush)
284                 drm_clflush_virt_range(vaddr + offset, len);
285
286         ret = __copy_to_user(user_data, vaddr + offset, len);
287
288         kunmap(page);
289
290         return ret ? -EFAULT : 0;
291 }
292
293 static int
294 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
295                      struct drm_i915_gem_pread *args)
296 {
297         unsigned int needs_clflush;
298         unsigned int idx, offset;
299         struct dma_fence *fence;
300         char __user *user_data;
301         u64 remain;
302         int ret;
303
304         ret = i915_gem_object_prepare_read(obj, &needs_clflush);
305         if (ret)
306                 return ret;
307
308         fence = i915_gem_object_lock_fence(obj);
309         i915_gem_object_finish_access(obj);
310         if (!fence)
311                 return -ENOMEM;
312
313         remain = args->size;
314         user_data = u64_to_user_ptr(args->data_ptr);
315         offset = offset_in_page(args->offset);
316         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
317                 struct page *page = i915_gem_object_get_page(obj, idx);
318                 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
319
320                 ret = shmem_pread(page, offset, length, user_data,
321                                   needs_clflush);
322                 if (ret)
323                         break;
324
325                 remain -= length;
326                 user_data += length;
327                 offset = 0;
328         }
329
330         i915_gem_object_unlock_fence(obj, fence);
331         return ret;
332 }
333
334 static inline bool
335 gtt_user_read(struct io_mapping *mapping,
336               loff_t base, int offset,
337               char __user *user_data, int length)
338 {
339         void __iomem *vaddr;
340         unsigned long unwritten;
341
342         /* We can use the cpu mem copy function because this is X86. */
343         vaddr = io_mapping_map_atomic_wc(mapping, base);
344         unwritten = __copy_to_user_inatomic(user_data,
345                                             (void __force *)vaddr + offset,
346                                             length);
347         io_mapping_unmap_atomic(vaddr);
348         if (unwritten) {
349                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
350                 unwritten = copy_to_user(user_data,
351                                          (void __force *)vaddr + offset,
352                                          length);
353                 io_mapping_unmap(vaddr);
354         }
355         return unwritten;
356 }
357
358 static int
359 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
360                    const struct drm_i915_gem_pread *args)
361 {
362         struct drm_i915_private *i915 = to_i915(obj->base.dev);
363         struct i915_ggtt *ggtt = &i915->ggtt;
364         intel_wakeref_t wakeref;
365         struct drm_mm_node node;
366         struct dma_fence *fence;
367         void __user *user_data;
368         struct i915_vma *vma;
369         u64 remain, offset;
370         int ret;
371
372         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
373         if (ret)
374                 return ret;
375
376         wakeref = intel_runtime_pm_get(i915);
377         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
378                                        PIN_MAPPABLE |
379                                        PIN_NONFAULT |
380                                        PIN_NONBLOCK);
381         if (!IS_ERR(vma)) {
382                 node.start = i915_ggtt_offset(vma);
383                 node.allocated = false;
384                 ret = i915_vma_put_fence(vma);
385                 if (ret) {
386                         i915_vma_unpin(vma);
387                         vma = ERR_PTR(ret);
388                 }
389         }
390         if (IS_ERR(vma)) {
391                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
392                 if (ret)
393                         goto out_unlock;
394                 GEM_BUG_ON(!node.allocated);
395         }
396
397         mutex_unlock(&i915->drm.struct_mutex);
398
399         ret = i915_gem_object_lock_interruptible(obj);
400         if (ret)
401                 goto out_unpin;
402
403         ret = i915_gem_object_set_to_gtt_domain(obj, false);
404         if (ret) {
405                 i915_gem_object_unlock(obj);
406                 goto out_unpin;
407         }
408
409         fence = i915_gem_object_lock_fence(obj);
410         i915_gem_object_unlock(obj);
411         if (!fence) {
412                 ret = -ENOMEM;
413                 goto out_unpin;
414         }
415
416         user_data = u64_to_user_ptr(args->data_ptr);
417         remain = args->size;
418         offset = args->offset;
419
420         while (remain > 0) {
421                 /* Operation in this page
422                  *
423                  * page_base = page offset within aperture
424                  * page_offset = offset within page
425                  * page_length = bytes to copy for this page
426                  */
427                 u32 page_base = node.start;
428                 unsigned page_offset = offset_in_page(offset);
429                 unsigned page_length = PAGE_SIZE - page_offset;
430                 page_length = remain < page_length ? remain : page_length;
431                 if (node.allocated) {
432                         wmb();
433                         ggtt->vm.insert_page(&ggtt->vm,
434                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
435                                              node.start, I915_CACHE_NONE, 0);
436                         wmb();
437                 } else {
438                         page_base += offset & PAGE_MASK;
439                 }
440
441                 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
442                                   user_data, page_length)) {
443                         ret = -EFAULT;
444                         break;
445                 }
446
447                 remain -= page_length;
448                 user_data += page_length;
449                 offset += page_length;
450         }
451
452         i915_gem_object_unlock_fence(obj, fence);
453 out_unpin:
454         mutex_lock(&i915->drm.struct_mutex);
455         if (node.allocated) {
456                 wmb();
457                 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
458                 remove_mappable_node(&node);
459         } else {
460                 i915_vma_unpin(vma);
461         }
462 out_unlock:
463         intel_runtime_pm_put(i915, wakeref);
464         mutex_unlock(&i915->drm.struct_mutex);
465
466         return ret;
467 }
468
469 /**
470  * Reads data from the object referenced by handle.
471  * @dev: drm device pointer
472  * @data: ioctl data blob
473  * @file: drm file pointer
474  *
475  * On error, the contents of *data are undefined.
476  */
477 int
478 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
479                      struct drm_file *file)
480 {
481         struct drm_i915_gem_pread *args = data;
482         struct drm_i915_gem_object *obj;
483         int ret;
484
485         if (args->size == 0)
486                 return 0;
487
488         if (!access_ok(u64_to_user_ptr(args->data_ptr),
489                        args->size))
490                 return -EFAULT;
491
492         obj = i915_gem_object_lookup(file, args->handle);
493         if (!obj)
494                 return -ENOENT;
495
496         /* Bounds check source.  */
497         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
498                 ret = -EINVAL;
499                 goto out;
500         }
501
502         trace_i915_gem_object_pread(obj, args->offset, args->size);
503
504         ret = i915_gem_object_wait(obj,
505                                    I915_WAIT_INTERRUPTIBLE,
506                                    MAX_SCHEDULE_TIMEOUT);
507         if (ret)
508                 goto out;
509
510         ret = i915_gem_object_pin_pages(obj);
511         if (ret)
512                 goto out;
513
514         ret = i915_gem_shmem_pread(obj, args);
515         if (ret == -EFAULT || ret == -ENODEV)
516                 ret = i915_gem_gtt_pread(obj, args);
517
518         i915_gem_object_unpin_pages(obj);
519 out:
520         i915_gem_object_put(obj);
521         return ret;
522 }
523
524 /* This is the fast write path which cannot handle
525  * page faults in the source data
526  */
527
528 static inline bool
529 ggtt_write(struct io_mapping *mapping,
530            loff_t base, int offset,
531            char __user *user_data, int length)
532 {
533         void __iomem *vaddr;
534         unsigned long unwritten;
535
536         /* We can use the cpu mem copy function because this is X86. */
537         vaddr = io_mapping_map_atomic_wc(mapping, base);
538         unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
539                                                       user_data, length);
540         io_mapping_unmap_atomic(vaddr);
541         if (unwritten) {
542                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
543                 unwritten = copy_from_user((void __force *)vaddr + offset,
544                                            user_data, length);
545                 io_mapping_unmap(vaddr);
546         }
547
548         return unwritten;
549 }
550
551 /**
552  * This is the fast pwrite path, where we copy the data directly from the
553  * user into the GTT, uncached.
554  * @obj: i915 GEM object
555  * @args: pwrite arguments structure
556  */
557 static int
558 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
559                          const struct drm_i915_gem_pwrite *args)
560 {
561         struct drm_i915_private *i915 = to_i915(obj->base.dev);
562         struct i915_ggtt *ggtt = &i915->ggtt;
563         intel_wakeref_t wakeref;
564         struct drm_mm_node node;
565         struct dma_fence *fence;
566         struct i915_vma *vma;
567         u64 remain, offset;
568         void __user *user_data;
569         int ret;
570
571         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
572         if (ret)
573                 return ret;
574
575         if (i915_gem_object_has_struct_page(obj)) {
576                 /*
577                  * Avoid waking the device up if we can fallback, as
578                  * waking/resuming is very slow (worst-case 10-100 ms
579                  * depending on PCI sleeps and our own resume time).
580                  * This easily dwarfs any performance advantage from
581                  * using the cache bypass of indirect GGTT access.
582                  */
583                 wakeref = intel_runtime_pm_get_if_in_use(i915);
584                 if (!wakeref) {
585                         ret = -EFAULT;
586                         goto out_unlock;
587                 }
588         } else {
589                 /* No backing pages, no fallback, we must force GGTT access */
590                 wakeref = intel_runtime_pm_get(i915);
591         }
592
593         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
594                                        PIN_MAPPABLE |
595                                        PIN_NONFAULT |
596                                        PIN_NONBLOCK);
597         if (!IS_ERR(vma)) {
598                 node.start = i915_ggtt_offset(vma);
599                 node.allocated = false;
600                 ret = i915_vma_put_fence(vma);
601                 if (ret) {
602                         i915_vma_unpin(vma);
603                         vma = ERR_PTR(ret);
604                 }
605         }
606         if (IS_ERR(vma)) {
607                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
608                 if (ret)
609                         goto out_rpm;
610                 GEM_BUG_ON(!node.allocated);
611         }
612
613         mutex_unlock(&i915->drm.struct_mutex);
614
615         ret = i915_gem_object_lock_interruptible(obj);
616         if (ret)
617                 goto out_unpin;
618
619         ret = i915_gem_object_set_to_gtt_domain(obj, true);
620         if (ret) {
621                 i915_gem_object_unlock(obj);
622                 goto out_unpin;
623         }
624
625         fence = i915_gem_object_lock_fence(obj);
626         i915_gem_object_unlock(obj);
627         if (!fence) {
628                 ret = -ENOMEM;
629                 goto out_unpin;
630         }
631
632         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
633
634         user_data = u64_to_user_ptr(args->data_ptr);
635         offset = args->offset;
636         remain = args->size;
637         while (remain) {
638                 /* Operation in this page
639                  *
640                  * page_base = page offset within aperture
641                  * page_offset = offset within page
642                  * page_length = bytes to copy for this page
643                  */
644                 u32 page_base = node.start;
645                 unsigned int page_offset = offset_in_page(offset);
646                 unsigned int page_length = PAGE_SIZE - page_offset;
647                 page_length = remain < page_length ? remain : page_length;
648                 if (node.allocated) {
649                         wmb(); /* flush the write before we modify the GGTT */
650                         ggtt->vm.insert_page(&ggtt->vm,
651                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
652                                              node.start, I915_CACHE_NONE, 0);
653                         wmb(); /* flush modifications to the GGTT (insert_page) */
654                 } else {
655                         page_base += offset & PAGE_MASK;
656                 }
657                 /* If we get a fault while copying data, then (presumably) our
658                  * source page isn't available.  Return the error and we'll
659                  * retry in the slow path.
660                  * If the object is non-shmem backed, we retry again with the
661                  * path that handles page fault.
662                  */
663                 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
664                                user_data, page_length)) {
665                         ret = -EFAULT;
666                         break;
667                 }
668
669                 remain -= page_length;
670                 user_data += page_length;
671                 offset += page_length;
672         }
673         intel_fb_obj_flush(obj, ORIGIN_CPU);
674
675         i915_gem_object_unlock_fence(obj, fence);
676 out_unpin:
677         mutex_lock(&i915->drm.struct_mutex);
678         if (node.allocated) {
679                 wmb();
680                 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
681                 remove_mappable_node(&node);
682         } else {
683                 i915_vma_unpin(vma);
684         }
685 out_rpm:
686         intel_runtime_pm_put(i915, wakeref);
687 out_unlock:
688         mutex_unlock(&i915->drm.struct_mutex);
689         return ret;
690 }
691
692 /* Per-page copy function for the shmem pwrite fastpath.
693  * Flushes invalid cachelines before writing to the target if
694  * needs_clflush_before is set and flushes out any written cachelines after
695  * writing if needs_clflush is set.
696  */
697 static int
698 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
699              bool needs_clflush_before,
700              bool needs_clflush_after)
701 {
702         char *vaddr;
703         int ret;
704
705         vaddr = kmap(page);
706
707         if (needs_clflush_before)
708                 drm_clflush_virt_range(vaddr + offset, len);
709
710         ret = __copy_from_user(vaddr + offset, user_data, len);
711         if (!ret && needs_clflush_after)
712                 drm_clflush_virt_range(vaddr + offset, len);
713
714         kunmap(page);
715
716         return ret ? -EFAULT : 0;
717 }
718
719 static int
720 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
721                       const struct drm_i915_gem_pwrite *args)
722 {
723         unsigned int partial_cacheline_write;
724         unsigned int needs_clflush;
725         unsigned int offset, idx;
726         struct dma_fence *fence;
727         void __user *user_data;
728         u64 remain;
729         int ret;
730
731         ret = i915_gem_object_prepare_write(obj, &needs_clflush);
732         if (ret)
733                 return ret;
734
735         fence = i915_gem_object_lock_fence(obj);
736         i915_gem_object_finish_access(obj);
737         if (!fence)
738                 return -ENOMEM;
739
740         /* If we don't overwrite a cacheline completely we need to be
741          * careful to have up-to-date data by first clflushing. Don't
742          * overcomplicate things and flush the entire patch.
743          */
744         partial_cacheline_write = 0;
745         if (needs_clflush & CLFLUSH_BEFORE)
746                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
747
748         user_data = u64_to_user_ptr(args->data_ptr);
749         remain = args->size;
750         offset = offset_in_page(args->offset);
751         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
752                 struct page *page = i915_gem_object_get_page(obj, idx);
753                 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
754
755                 ret = shmem_pwrite(page, offset, length, user_data,
756                                    (offset | length) & partial_cacheline_write,
757                                    needs_clflush & CLFLUSH_AFTER);
758                 if (ret)
759                         break;
760
761                 remain -= length;
762                 user_data += length;
763                 offset = 0;
764         }
765
766         intel_fb_obj_flush(obj, ORIGIN_CPU);
767         i915_gem_object_unlock_fence(obj, fence);
768
769         return ret;
770 }
771
772 /**
773  * Writes data to the object referenced by handle.
774  * @dev: drm device
775  * @data: ioctl data blob
776  * @file: drm file
777  *
778  * On error, the contents of the buffer that were to be modified are undefined.
779  */
780 int
781 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
782                       struct drm_file *file)
783 {
784         struct drm_i915_gem_pwrite *args = data;
785         struct drm_i915_gem_object *obj;
786         int ret;
787
788         if (args->size == 0)
789                 return 0;
790
791         if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
792                 return -EFAULT;
793
794         obj = i915_gem_object_lookup(file, args->handle);
795         if (!obj)
796                 return -ENOENT;
797
798         /* Bounds check destination. */
799         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
800                 ret = -EINVAL;
801                 goto err;
802         }
803
804         /* Writes not allowed into this read-only object */
805         if (i915_gem_object_is_readonly(obj)) {
806                 ret = -EINVAL;
807                 goto err;
808         }
809
810         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
811
812         ret = -ENODEV;
813         if (obj->ops->pwrite)
814                 ret = obj->ops->pwrite(obj, args);
815         if (ret != -ENODEV)
816                 goto err;
817
818         ret = i915_gem_object_wait(obj,
819                                    I915_WAIT_INTERRUPTIBLE |
820                                    I915_WAIT_ALL,
821                                    MAX_SCHEDULE_TIMEOUT);
822         if (ret)
823                 goto err;
824
825         ret = i915_gem_object_pin_pages(obj);
826         if (ret)
827                 goto err;
828
829         ret = -EFAULT;
830         /* We can only do the GTT pwrite on untiled buffers, as otherwise
831          * it would end up going through the fenced access, and we'll get
832          * different detiling behavior between reading and writing.
833          * pread/pwrite currently are reading and writing from the CPU
834          * perspective, requiring manual detiling by the client.
835          */
836         if (!i915_gem_object_has_struct_page(obj) ||
837             cpu_write_needs_clflush(obj))
838                 /* Note that the gtt paths might fail with non-page-backed user
839                  * pointers (e.g. gtt mappings when moving data between
840                  * textures). Fallback to the shmem path in that case.
841                  */
842                 ret = i915_gem_gtt_pwrite_fast(obj, args);
843
844         if (ret == -EFAULT || ret == -ENOSPC) {
845                 if (obj->phys_handle)
846                         ret = i915_gem_phys_pwrite(obj, args, file);
847                 else
848                         ret = i915_gem_shmem_pwrite(obj, args);
849         }
850
851         i915_gem_object_unpin_pages(obj);
852 err:
853         i915_gem_object_put(obj);
854         return ret;
855 }
856
857 /**
858  * Called when user space has done writes to this buffer
859  * @dev: drm device
860  * @data: ioctl data blob
861  * @file: drm file
862  */
863 int
864 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
865                          struct drm_file *file)
866 {
867         struct drm_i915_gem_sw_finish *args = data;
868         struct drm_i915_gem_object *obj;
869
870         obj = i915_gem_object_lookup(file, args->handle);
871         if (!obj)
872                 return -ENOENT;
873
874         /*
875          * Proxy objects are barred from CPU access, so there is no
876          * need to ban sw_finish as it is a nop.
877          */
878
879         /* Pinned buffers may be scanout, so flush the cache */
880         i915_gem_object_flush_if_display(obj);
881         i915_gem_object_put(obj);
882
883         return 0;
884 }
885
886 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
887 {
888         struct drm_i915_gem_object *obj, *on;
889         int i;
890
891         /*
892          * Only called during RPM suspend. All users of the userfault_list
893          * must be holding an RPM wakeref to ensure that this can not
894          * run concurrently with themselves (and use the struct_mutex for
895          * protection between themselves).
896          */
897
898         list_for_each_entry_safe(obj, on,
899                                  &dev_priv->mm.userfault_list, userfault_link)
900                 __i915_gem_object_release_mmap(obj);
901
902         /* The fence will be lost when the device powers down. If any were
903          * in use by hardware (i.e. they are pinned), we should not be powering
904          * down! All other fences will be reacquired by the user upon waking.
905          */
906         for (i = 0; i < dev_priv->num_fence_regs; i++) {
907                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
908
909                 /* Ideally we want to assert that the fence register is not
910                  * live at this point (i.e. that no piece of code will be
911                  * trying to write through fence + GTT, as that both violates
912                  * our tracking of activity and associated locking/barriers,
913                  * but also is illegal given that the hw is powered down).
914                  *
915                  * Previously we used reg->pin_count as a "liveness" indicator.
916                  * That is not sufficient, and we need a more fine-grained
917                  * tool if we want to have a sanity check here.
918                  */
919
920                 if (!reg->vma)
921                         continue;
922
923                 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
924                 reg->dirty = true;
925         }
926 }
927
928 static int wait_for_engines(struct drm_i915_private *i915)
929 {
930         if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
931                 dev_err(i915->drm.dev,
932                         "Failed to idle engines, declaring wedged!\n");
933                 GEM_TRACE_DUMP();
934                 i915_gem_set_wedged(i915);
935                 return -EIO;
936         }
937
938         return 0;
939 }
940
941 static long
942 wait_for_timelines(struct drm_i915_private *i915,
943                    unsigned int flags, long timeout)
944 {
945         struct i915_gt_timelines *gt = &i915->gt.timelines;
946         struct i915_timeline *tl;
947
948         mutex_lock(&gt->mutex);
949         list_for_each_entry(tl, &gt->active_list, link) {
950                 struct i915_request *rq;
951
952                 rq = i915_active_request_get_unlocked(&tl->last_request);
953                 if (!rq)
954                         continue;
955
956                 mutex_unlock(&gt->mutex);
957
958                 /*
959                  * "Race-to-idle".
960                  *
961                  * Switching to the kernel context is often used a synchronous
962                  * step prior to idling, e.g. in suspend for flushing all
963                  * current operations to memory before sleeping. These we
964                  * want to complete as quickly as possible to avoid prolonged
965                  * stalls, so allow the gpu to boost to maximum clocks.
966                  */
967                 if (flags & I915_WAIT_FOR_IDLE_BOOST)
968                         gen6_rps_boost(rq);
969
970                 timeout = i915_request_wait(rq, flags, timeout);
971                 i915_request_put(rq);
972                 if (timeout < 0)
973                         return timeout;
974
975                 /* restart after reacquiring the lock */
976                 mutex_lock(&gt->mutex);
977                 tl = list_entry(&gt->active_list, typeof(*tl), link);
978         }
979         mutex_unlock(&gt->mutex);
980
981         return timeout;
982 }
983
984 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
985                            unsigned int flags, long timeout)
986 {
987         GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
988                   flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
989                   timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
990                   yesno(i915->gt.awake));
991
992         /* If the device is asleep, we have no requests outstanding */
993         if (!READ_ONCE(i915->gt.awake))
994                 return 0;
995
996         timeout = wait_for_timelines(i915, flags, timeout);
997         if (timeout < 0)
998                 return timeout;
999
1000         if (flags & I915_WAIT_LOCKED) {
1001                 int err;
1002
1003                 lockdep_assert_held(&i915->drm.struct_mutex);
1004
1005                 err = wait_for_engines(i915);
1006                 if (err)
1007                         return err;
1008
1009                 i915_retire_requests(i915);
1010         }
1011
1012         return 0;
1013 }
1014
1015 struct i915_vma *
1016 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1017                          const struct i915_ggtt_view *view,
1018                          u64 size,
1019                          u64 alignment,
1020                          u64 flags)
1021 {
1022         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1023         struct i915_address_space *vm = &dev_priv->ggtt.vm;
1024         struct i915_vma *vma;
1025         int ret;
1026
1027         lockdep_assert_held(&obj->base.dev->struct_mutex);
1028
1029         if (flags & PIN_MAPPABLE &&
1030             (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1031                 /* If the required space is larger than the available
1032                  * aperture, we will not able to find a slot for the
1033                  * object and unbinding the object now will be in
1034                  * vain. Worse, doing so may cause us to ping-pong
1035                  * the object in and out of the Global GTT and
1036                  * waste a lot of cycles under the mutex.
1037                  */
1038                 if (obj->base.size > dev_priv->ggtt.mappable_end)
1039                         return ERR_PTR(-E2BIG);
1040
1041                 /* If NONBLOCK is set the caller is optimistically
1042                  * trying to cache the full object within the mappable
1043                  * aperture, and *must* have a fallback in place for
1044                  * situations where we cannot bind the object. We
1045                  * can be a little more lax here and use the fallback
1046                  * more often to avoid costly migrations of ourselves
1047                  * and other objects within the aperture.
1048                  *
1049                  * Half-the-aperture is used as a simple heuristic.
1050                  * More interesting would to do search for a free
1051                  * block prior to making the commitment to unbind.
1052                  * That caters for the self-harm case, and with a
1053                  * little more heuristics (e.g. NOFAULT, NOEVICT)
1054                  * we could try to minimise harm to others.
1055                  */
1056                 if (flags & PIN_NONBLOCK &&
1057                     obj->base.size > dev_priv->ggtt.mappable_end / 2)
1058                         return ERR_PTR(-ENOSPC);
1059         }
1060
1061         vma = i915_vma_instance(obj, vm, view);
1062         if (IS_ERR(vma))
1063                 return vma;
1064
1065         if (i915_vma_misplaced(vma, size, alignment, flags)) {
1066                 if (flags & PIN_NONBLOCK) {
1067                         if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1068                                 return ERR_PTR(-ENOSPC);
1069
1070                         if (flags & PIN_MAPPABLE &&
1071                             vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1072                                 return ERR_PTR(-ENOSPC);
1073                 }
1074
1075                 WARN(i915_vma_is_pinned(vma),
1076                      "bo is already pinned in ggtt with incorrect alignment:"
1077                      " offset=%08x, req.alignment=%llx,"
1078                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1079                      i915_ggtt_offset(vma), alignment,
1080                      !!(flags & PIN_MAPPABLE),
1081                      i915_vma_is_map_and_fenceable(vma));
1082                 ret = i915_vma_unbind(vma);
1083                 if (ret)
1084                         return ERR_PTR(ret);
1085         }
1086
1087         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1088         if (ret)
1089                 return ERR_PTR(ret);
1090
1091         return vma;
1092 }
1093
1094 int
1095 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1096                        struct drm_file *file_priv)
1097 {
1098         struct drm_i915_private *i915 = to_i915(dev);
1099         struct drm_i915_gem_madvise *args = data;
1100         struct drm_i915_gem_object *obj;
1101         int err;
1102
1103         switch (args->madv) {
1104         case I915_MADV_DONTNEED:
1105         case I915_MADV_WILLNEED:
1106             break;
1107         default:
1108             return -EINVAL;
1109         }
1110
1111         obj = i915_gem_object_lookup(file_priv, args->handle);
1112         if (!obj)
1113                 return -ENOENT;
1114
1115         err = mutex_lock_interruptible(&obj->mm.lock);
1116         if (err)
1117                 goto out;
1118
1119         if (i915_gem_object_has_pages(obj) &&
1120             i915_gem_object_is_tiled(obj) &&
1121             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1122                 if (obj->mm.madv == I915_MADV_WILLNEED) {
1123                         GEM_BUG_ON(!obj->mm.quirked);
1124                         __i915_gem_object_unpin_pages(obj);
1125                         obj->mm.quirked = false;
1126                 }
1127                 if (args->madv == I915_MADV_WILLNEED) {
1128                         GEM_BUG_ON(obj->mm.quirked);
1129                         __i915_gem_object_pin_pages(obj);
1130                         obj->mm.quirked = true;
1131                 }
1132         }
1133
1134         if (obj->mm.madv != __I915_MADV_PURGED)
1135                 obj->mm.madv = args->madv;
1136
1137         if (i915_gem_object_has_pages(obj)) {
1138                 struct list_head *list;
1139
1140                 if (i915_gem_object_is_shrinkable(obj)) {
1141                         unsigned long flags;
1142
1143                         spin_lock_irqsave(&i915->mm.obj_lock, flags);
1144
1145                         if (obj->mm.madv != I915_MADV_WILLNEED)
1146                                 list = &i915->mm.purge_list;
1147                         else if (obj->bind_count)
1148                                 list = &i915->mm.bound_list;
1149                         else
1150                                 list = &i915->mm.unbound_list;
1151                         list_move_tail(&obj->mm.link, list);
1152
1153                         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1154                 }
1155         }
1156
1157         /* if the object is no longer attached, discard its backing storage */
1158         if (obj->mm.madv == I915_MADV_DONTNEED &&
1159             !i915_gem_object_has_pages(obj))
1160                 i915_gem_object_truncate(obj);
1161
1162         args->retained = obj->mm.madv != __I915_MADV_PURGED;
1163         mutex_unlock(&obj->mm.lock);
1164
1165 out:
1166         i915_gem_object_put(obj);
1167         return err;
1168 }
1169
1170 void i915_gem_sanitize(struct drm_i915_private *i915)
1171 {
1172         intel_wakeref_t wakeref;
1173
1174         GEM_TRACE("\n");
1175
1176         wakeref = intel_runtime_pm_get(i915);
1177         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1178
1179         /*
1180          * As we have just resumed the machine and woken the device up from
1181          * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1182          * back to defaults, recovering from whatever wedged state we left it
1183          * in and so worth trying to use the device once more.
1184          */
1185         if (i915_terminally_wedged(i915))
1186                 i915_gem_unset_wedged(i915);
1187
1188         /*
1189          * If we inherit context state from the BIOS or earlier occupants
1190          * of the GPU, the GPU may be in an inconsistent state when we
1191          * try to take over. The only way to remove the earlier state
1192          * is by resetting. However, resetting on earlier gen is tricky as
1193          * it may impact the display and we are uncertain about the stability
1194          * of the reset, so this could be applied to even earlier gen.
1195          */
1196         intel_gt_sanitize(i915, false);
1197
1198         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1199         intel_runtime_pm_put(i915, wakeref);
1200
1201         mutex_lock(&i915->drm.struct_mutex);
1202         i915_gem_contexts_lost(i915);
1203         mutex_unlock(&i915->drm.struct_mutex);
1204 }
1205
1206 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
1207 {
1208         if (INTEL_GEN(dev_priv) < 5 ||
1209             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
1210                 return;
1211
1212         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
1213                                  DISP_TILE_SURFACE_SWIZZLING);
1214
1215         if (IS_GEN(dev_priv, 5))
1216                 return;
1217
1218         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
1219         if (IS_GEN(dev_priv, 6))
1220                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
1221         else if (IS_GEN(dev_priv, 7))
1222                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
1223         else if (IS_GEN(dev_priv, 8))
1224                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
1225         else
1226                 BUG();
1227 }
1228
1229 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
1230 {
1231         I915_WRITE(RING_CTL(base), 0);
1232         I915_WRITE(RING_HEAD(base), 0);
1233         I915_WRITE(RING_TAIL(base), 0);
1234         I915_WRITE(RING_START(base), 0);
1235 }
1236
1237 static void init_unused_rings(struct drm_i915_private *dev_priv)
1238 {
1239         if (IS_I830(dev_priv)) {
1240                 init_unused_ring(dev_priv, PRB1_BASE);
1241                 init_unused_ring(dev_priv, SRB0_BASE);
1242                 init_unused_ring(dev_priv, SRB1_BASE);
1243                 init_unused_ring(dev_priv, SRB2_BASE);
1244                 init_unused_ring(dev_priv, SRB3_BASE);
1245         } else if (IS_GEN(dev_priv, 2)) {
1246                 init_unused_ring(dev_priv, SRB0_BASE);
1247                 init_unused_ring(dev_priv, SRB1_BASE);
1248         } else if (IS_GEN(dev_priv, 3)) {
1249                 init_unused_ring(dev_priv, PRB1_BASE);
1250                 init_unused_ring(dev_priv, PRB2_BASE);
1251         }
1252 }
1253
1254 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1255 {
1256         int ret;
1257
1258         dev_priv->gt.last_init_time = ktime_get();
1259
1260         /* Double layer security blanket, see i915_gem_init() */
1261         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1262
1263         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
1264                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
1265
1266         if (IS_HASWELL(dev_priv))
1267                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
1268                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1269
1270         /* Apply the GT workarounds... */
1271         intel_gt_apply_workarounds(dev_priv);
1272         /* ...and determine whether they are sticking. */
1273         intel_gt_verify_workarounds(dev_priv, "init");
1274
1275         i915_gem_init_swizzling(dev_priv);
1276
1277         /*
1278          * At least 830 can leave some of the unused rings
1279          * "active" (ie. head != tail) after resume which
1280          * will prevent c3 entry. Makes sure all unused rings
1281          * are totally idle.
1282          */
1283         init_unused_rings(dev_priv);
1284
1285         BUG_ON(!dev_priv->kernel_context);
1286         ret = i915_terminally_wedged(dev_priv);
1287         if (ret)
1288                 goto out;
1289
1290         ret = i915_ppgtt_init_hw(dev_priv);
1291         if (ret) {
1292                 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1293                 goto out;
1294         }
1295
1296         ret = intel_wopcm_init_hw(&dev_priv->wopcm);
1297         if (ret) {
1298                 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1299                 goto out;
1300         }
1301
1302         /* We can't enable contexts until all firmware is loaded */
1303         ret = intel_uc_init_hw(dev_priv);
1304         if (ret) {
1305                 DRM_ERROR("Enabling uc failed (%d)\n", ret);
1306                 goto out;
1307         }
1308
1309         intel_mocs_init_l3cc_table(dev_priv);
1310
1311         /* Only when the HW is re-initialised, can we replay the requests */
1312         ret = intel_engines_resume(dev_priv);
1313         if (ret)
1314                 goto cleanup_uc;
1315
1316         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1317
1318         intel_engines_set_scheduler_caps(dev_priv);
1319         return 0;
1320
1321 cleanup_uc:
1322         intel_uc_fini_hw(dev_priv);
1323 out:
1324         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1325
1326         return ret;
1327 }
1328
1329 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1330 {
1331         struct intel_engine_cs *engine;
1332         struct i915_gem_context *ctx;
1333         struct i915_gem_engines *e;
1334         enum intel_engine_id id;
1335         int err = 0;
1336
1337         /*
1338          * As we reset the gpu during very early sanitisation, the current
1339          * register state on the GPU should reflect its defaults values.
1340          * We load a context onto the hw (with restore-inhibit), then switch
1341          * over to a second context to save that default register state. We
1342          * can then prime every new context with that state so they all start
1343          * from the same default HW values.
1344          */
1345
1346         ctx = i915_gem_context_create_kernel(i915, 0);
1347         if (IS_ERR(ctx))
1348                 return PTR_ERR(ctx);
1349
1350         e = i915_gem_context_lock_engines(ctx);
1351
1352         for_each_engine(engine, i915, id) {
1353                 struct intel_context *ce = e->engines[id];
1354                 struct i915_request *rq;
1355
1356                 rq = intel_context_create_request(ce);
1357                 if (IS_ERR(rq)) {
1358                         err = PTR_ERR(rq);
1359                         goto err_active;
1360                 }
1361
1362                 err = 0;
1363                 if (rq->engine->init_context)
1364                         err = rq->engine->init_context(rq);
1365
1366                 i915_request_add(rq);
1367                 if (err)
1368                         goto err_active;
1369         }
1370
1371         /* Flush the default context image to memory, and enable powersaving. */
1372         if (!i915_gem_load_power_context(i915)) {
1373                 err = -EIO;
1374                 goto err_active;
1375         }
1376
1377         for_each_engine(engine, i915, id) {
1378                 struct intel_context *ce = e->engines[id];
1379                 struct i915_vma *state = ce->state;
1380                 void *vaddr;
1381
1382                 if (!state)
1383                         continue;
1384
1385                 GEM_BUG_ON(intel_context_is_pinned(ce));
1386
1387                 /*
1388                  * As we will hold a reference to the logical state, it will
1389                  * not be torn down with the context, and importantly the
1390                  * object will hold onto its vma (making it possible for a
1391                  * stray GTT write to corrupt our defaults). Unmap the vma
1392                  * from the GTT to prevent such accidents and reclaim the
1393                  * space.
1394                  */
1395                 err = i915_vma_unbind(state);
1396                 if (err)
1397                         goto err_active;
1398
1399                 i915_gem_object_lock(state->obj);
1400                 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1401                 i915_gem_object_unlock(state->obj);
1402                 if (err)
1403                         goto err_active;
1404
1405                 engine->default_state = i915_gem_object_get(state->obj);
1406                 i915_gem_object_set_cache_coherency(engine->default_state,
1407                                                     I915_CACHE_LLC);
1408
1409                 /* Check we can acquire the image of the context state */
1410                 vaddr = i915_gem_object_pin_map(engine->default_state,
1411                                                 I915_MAP_FORCE_WB);
1412                 if (IS_ERR(vaddr)) {
1413                         err = PTR_ERR(vaddr);
1414                         goto err_active;
1415                 }
1416
1417                 i915_gem_object_unpin_map(engine->default_state);
1418         }
1419
1420         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1421                 unsigned int found = intel_engines_has_context_isolation(i915);
1422
1423                 /*
1424                  * Make sure that classes with multiple engine instances all
1425                  * share the same basic configuration.
1426                  */
1427                 for_each_engine(engine, i915, id) {
1428                         unsigned int bit = BIT(engine->uabi_class);
1429                         unsigned int expected = engine->default_state ? bit : 0;
1430
1431                         if ((found & bit) != expected) {
1432                                 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1433                                           engine->uabi_class, engine->name);
1434                         }
1435                 }
1436         }
1437
1438 out_ctx:
1439         i915_gem_context_unlock_engines(ctx);
1440         i915_gem_context_set_closed(ctx);
1441         i915_gem_context_put(ctx);
1442         return err;
1443
1444 err_active:
1445         /*
1446          * If we have to abandon now, we expect the engines to be idle
1447          * and ready to be torn-down. The quickest way we can accomplish
1448          * this is by declaring ourselves wedged.
1449          */
1450         i915_gem_set_wedged(i915);
1451         goto out_ctx;
1452 }
1453
1454 static int
1455 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1456 {
1457         struct drm_i915_gem_object *obj;
1458         struct i915_vma *vma;
1459         int ret;
1460
1461         obj = i915_gem_object_create_stolen(i915, size);
1462         if (!obj)
1463                 obj = i915_gem_object_create_internal(i915, size);
1464         if (IS_ERR(obj)) {
1465                 DRM_ERROR("Failed to allocate scratch page\n");
1466                 return PTR_ERR(obj);
1467         }
1468
1469         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1470         if (IS_ERR(vma)) {
1471                 ret = PTR_ERR(vma);
1472                 goto err_unref;
1473         }
1474
1475         ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1476         if (ret)
1477                 goto err_unref;
1478
1479         i915->gt.scratch = vma;
1480         return 0;
1481
1482 err_unref:
1483         i915_gem_object_put(obj);
1484         return ret;
1485 }
1486
1487 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1488 {
1489         i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1490 }
1491
1492 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1493 {
1494         struct intel_engine_cs *engine;
1495         enum intel_engine_id id;
1496         int err = 0;
1497
1498         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1499                 return 0;
1500
1501         for_each_engine(engine, i915, id) {
1502                 if (intel_engine_verify_workarounds(engine, "load"))
1503                         err = -EIO;
1504         }
1505
1506         return err;
1507 }
1508
1509 int i915_gem_init(struct drm_i915_private *dev_priv)
1510 {
1511         int ret;
1512
1513         /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1514         if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1515                 mkwrite_device_info(dev_priv)->page_sizes =
1516                         I915_GTT_PAGE_SIZE_4K;
1517
1518         dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
1519
1520         i915_timelines_init(dev_priv);
1521
1522         ret = i915_gem_init_userptr(dev_priv);
1523         if (ret)
1524                 return ret;
1525
1526         ret = intel_uc_init_misc(dev_priv);
1527         if (ret)
1528                 return ret;
1529
1530         ret = intel_wopcm_init(&dev_priv->wopcm);
1531         if (ret)
1532                 goto err_uc_misc;
1533
1534         /* This is just a security blanket to placate dragons.
1535          * On some systems, we very sporadically observe that the first TLBs
1536          * used by the CS may be stale, despite us poking the TLB reset. If
1537          * we hold the forcewake during initialisation these problems
1538          * just magically go away.
1539          */
1540         mutex_lock(&dev_priv->drm.struct_mutex);
1541         intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1542
1543         ret = i915_gem_init_ggtt(dev_priv);
1544         if (ret) {
1545                 GEM_BUG_ON(ret == -EIO);
1546                 goto err_unlock;
1547         }
1548
1549         ret = i915_gem_init_scratch(dev_priv,
1550                                     IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1551         if (ret) {
1552                 GEM_BUG_ON(ret == -EIO);
1553                 goto err_ggtt;
1554         }
1555
1556         ret = intel_engines_setup(dev_priv);
1557         if (ret) {
1558                 GEM_BUG_ON(ret == -EIO);
1559                 goto err_unlock;
1560         }
1561
1562         ret = i915_gem_contexts_init(dev_priv);
1563         if (ret) {
1564                 GEM_BUG_ON(ret == -EIO);
1565                 goto err_scratch;
1566         }
1567
1568         ret = intel_engines_init(dev_priv);
1569         if (ret) {
1570                 GEM_BUG_ON(ret == -EIO);
1571                 goto err_context;
1572         }
1573
1574         intel_init_gt_powersave(dev_priv);
1575
1576         ret = intel_uc_init(dev_priv);
1577         if (ret)
1578                 goto err_pm;
1579
1580         ret = i915_gem_init_hw(dev_priv);
1581         if (ret)
1582                 goto err_uc_init;
1583
1584         /*
1585          * Despite its name intel_init_clock_gating applies both display
1586          * clock gating workarounds; GT mmio workarounds and the occasional
1587          * GT power context workaround. Worse, sometimes it includes a context
1588          * register workaround which we need to apply before we record the
1589          * default HW state for all contexts.
1590          *
1591          * FIXME: break up the workarounds and apply them at the right time!
1592          */
1593         intel_init_clock_gating(dev_priv);
1594
1595         ret = intel_engines_verify_workarounds(dev_priv);
1596         if (ret)
1597                 goto err_init_hw;
1598
1599         ret = __intel_engines_record_defaults(dev_priv);
1600         if (ret)
1601                 goto err_init_hw;
1602
1603         if (i915_inject_load_failure()) {
1604                 ret = -ENODEV;
1605                 goto err_init_hw;
1606         }
1607
1608         if (i915_inject_load_failure()) {
1609                 ret = -EIO;
1610                 goto err_init_hw;
1611         }
1612
1613         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1614         mutex_unlock(&dev_priv->drm.struct_mutex);
1615
1616         return 0;
1617
1618         /*
1619          * Unwinding is complicated by that we want to handle -EIO to mean
1620          * disable GPU submission but keep KMS alive. We want to mark the
1621          * HW as irrevisibly wedged, but keep enough state around that the
1622          * driver doesn't explode during runtime.
1623          */
1624 err_init_hw:
1625         mutex_unlock(&dev_priv->drm.struct_mutex);
1626
1627         i915_gem_set_wedged(dev_priv);
1628         i915_gem_suspend(dev_priv);
1629         i915_gem_suspend_late(dev_priv);
1630
1631         i915_gem_drain_workqueue(dev_priv);
1632
1633         mutex_lock(&dev_priv->drm.struct_mutex);
1634         intel_uc_fini_hw(dev_priv);
1635 err_uc_init:
1636         intel_uc_fini(dev_priv);
1637 err_pm:
1638         if (ret != -EIO) {
1639                 intel_cleanup_gt_powersave(dev_priv);
1640                 intel_engines_cleanup(dev_priv);
1641         }
1642 err_context:
1643         if (ret != -EIO)
1644                 i915_gem_contexts_fini(dev_priv);
1645 err_scratch:
1646         i915_gem_fini_scratch(dev_priv);
1647 err_ggtt:
1648 err_unlock:
1649         intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1650         mutex_unlock(&dev_priv->drm.struct_mutex);
1651
1652 err_uc_misc:
1653         intel_uc_fini_misc(dev_priv);
1654
1655         if (ret != -EIO) {
1656                 i915_gem_cleanup_userptr(dev_priv);
1657                 i915_timelines_fini(dev_priv);
1658         }
1659
1660         if (ret == -EIO) {
1661                 mutex_lock(&dev_priv->drm.struct_mutex);
1662
1663                 /*
1664                  * Allow engine initialisation to fail by marking the GPU as
1665                  * wedged. But we only want to do this where the GPU is angry,
1666                  * for all other failure, such as an allocation failure, bail.
1667                  */
1668                 if (!i915_reset_failed(dev_priv)) {
1669                         i915_load_error(dev_priv,
1670                                         "Failed to initialize GPU, declaring it wedged!\n");
1671                         i915_gem_set_wedged(dev_priv);
1672                 }
1673
1674                 /* Minimal basic recovery for KMS */
1675                 ret = i915_ggtt_enable_hw(dev_priv);
1676                 i915_gem_restore_gtt_mappings(dev_priv);
1677                 i915_gem_restore_fences(dev_priv);
1678                 intel_init_clock_gating(dev_priv);
1679
1680                 mutex_unlock(&dev_priv->drm.struct_mutex);
1681         }
1682
1683         i915_gem_drain_freed_objects(dev_priv);
1684         return ret;
1685 }
1686
1687 void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
1688 {
1689         GEM_BUG_ON(dev_priv->gt.awake);
1690
1691         intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref);
1692
1693         i915_gem_suspend_late(dev_priv);
1694         intel_disable_gt_powersave(dev_priv);
1695
1696         /* Flush any outstanding unpin_work. */
1697         i915_gem_drain_workqueue(dev_priv);
1698
1699         mutex_lock(&dev_priv->drm.struct_mutex);
1700         intel_uc_fini_hw(dev_priv);
1701         intel_uc_fini(dev_priv);
1702         mutex_unlock(&dev_priv->drm.struct_mutex);
1703
1704         i915_gem_drain_freed_objects(dev_priv);
1705 }
1706
1707 void i915_gem_fini(struct drm_i915_private *dev_priv)
1708 {
1709         mutex_lock(&dev_priv->drm.struct_mutex);
1710         intel_engines_cleanup(dev_priv);
1711         i915_gem_contexts_fini(dev_priv);
1712         i915_gem_fini_scratch(dev_priv);
1713         mutex_unlock(&dev_priv->drm.struct_mutex);
1714
1715         intel_wa_list_free(&dev_priv->gt_wa_list);
1716
1717         intel_cleanup_gt_powersave(dev_priv);
1718
1719         intel_uc_fini_misc(dev_priv);
1720         i915_gem_cleanup_userptr(dev_priv);
1721         i915_timelines_fini(dev_priv);
1722
1723         i915_gem_drain_freed_objects(dev_priv);
1724
1725         WARN_ON(!list_empty(&dev_priv->contexts.list));
1726 }
1727
1728 void i915_gem_init_mmio(struct drm_i915_private *i915)
1729 {
1730         i915_gem_sanitize(i915);
1731 }
1732
1733 void
1734 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
1735 {
1736         int i;
1737
1738         if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
1739             !IS_CHERRYVIEW(dev_priv))
1740                 dev_priv->num_fence_regs = 32;
1741         else if (INTEL_GEN(dev_priv) >= 4 ||
1742                  IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
1743                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1744                 dev_priv->num_fence_regs = 16;
1745         else
1746                 dev_priv->num_fence_regs = 8;
1747
1748         if (intel_vgpu_active(dev_priv))
1749                 dev_priv->num_fence_regs =
1750                                 I915_READ(vgtif_reg(avail_rs.fence_num));
1751
1752         /* Initialize fence registers to zero */
1753         for (i = 0; i < dev_priv->num_fence_regs; i++) {
1754                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
1755
1756                 fence->i915 = dev_priv;
1757                 fence->id = i;
1758                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
1759         }
1760         i915_gem_restore_fences(dev_priv);
1761
1762         i915_gem_detect_bit_6_swizzle(dev_priv);
1763 }
1764
1765 static void i915_gem_init__mm(struct drm_i915_private *i915)
1766 {
1767         spin_lock_init(&i915->mm.obj_lock);
1768         spin_lock_init(&i915->mm.free_lock);
1769
1770         init_llist_head(&i915->mm.free_list);
1771
1772         INIT_LIST_HEAD(&i915->mm.purge_list);
1773         INIT_LIST_HEAD(&i915->mm.unbound_list);
1774         INIT_LIST_HEAD(&i915->mm.bound_list);
1775         INIT_LIST_HEAD(&i915->mm.fence_list);
1776
1777         INIT_LIST_HEAD(&i915->mm.userfault_list);
1778         intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915);
1779
1780         i915_gem_init__objects(i915);
1781 }
1782
1783 int i915_gem_init_early(struct drm_i915_private *dev_priv)
1784 {
1785         static struct lock_class_key reset_key;
1786         int err;
1787
1788         intel_gt_pm_init(dev_priv);
1789
1790         INIT_LIST_HEAD(&dev_priv->gt.active_rings);
1791         INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
1792         spin_lock_init(&dev_priv->gt.closed_lock);
1793         lockdep_init_map(&dev_priv->gt.reset_lockmap,
1794                          "i915.reset", &reset_key, 0);
1795
1796         i915_gem_init__mm(dev_priv);
1797         i915_gem_init__pm(dev_priv);
1798
1799         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1800         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
1801         mutex_init(&dev_priv->gpu_error.wedge_mutex);
1802         init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1803
1804         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1805
1806         spin_lock_init(&dev_priv->fb_tracking.lock);
1807
1808         err = i915_gemfs_init(dev_priv);
1809         if (err)
1810                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1811
1812         return 0;
1813 }
1814
1815 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1816 {
1817         i915_gem_drain_freed_objects(dev_priv);
1818         GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1819         GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1820         WARN_ON(dev_priv->mm.shrink_count);
1821
1822         cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1823
1824         i915_gemfs_fini(dev_priv);
1825 }
1826
1827 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1828 {
1829         /* Discard all purgeable objects, let userspace recover those as
1830          * required after resuming.
1831          */
1832         i915_gem_shrink_all(dev_priv);
1833
1834         return 0;
1835 }
1836
1837 int i915_gem_freeze_late(struct drm_i915_private *i915)
1838 {
1839         struct drm_i915_gem_object *obj;
1840         struct list_head *phases[] = {
1841                 &i915->mm.unbound_list,
1842                 &i915->mm.bound_list,
1843                 NULL
1844         }, **phase;
1845
1846         /*
1847          * Called just before we write the hibernation image.
1848          *
1849          * We need to update the domain tracking to reflect that the CPU
1850          * will be accessing all the pages to create and restore from the
1851          * hibernation, and so upon restoration those pages will be in the
1852          * CPU domain.
1853          *
1854          * To make sure the hibernation image contains the latest state,
1855          * we update that state just before writing out the image.
1856          *
1857          * To try and reduce the hibernation image, we manually shrink
1858          * the objects as well, see i915_gem_freeze()
1859          */
1860
1861         i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
1862         i915_gem_drain_freed_objects(i915);
1863
1864         for (phase = phases; *phase; phase++) {
1865                 list_for_each_entry(obj, *phase, mm.link) {
1866                         i915_gem_object_lock(obj);
1867                         WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1868                         i915_gem_object_unlock(obj);
1869                 }
1870         }
1871         GEM_BUG_ON(!list_empty(&i915->mm.purge_list));
1872
1873         return 0;
1874 }
1875
1876 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1877 {
1878         struct drm_i915_file_private *file_priv = file->driver_priv;
1879         struct i915_request *request;
1880
1881         /* Clean up our request list when the client is going away, so that
1882          * later retire_requests won't dereference our soon-to-be-gone
1883          * file_priv.
1884          */
1885         spin_lock(&file_priv->mm.lock);
1886         list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1887                 request->file_priv = NULL;
1888         spin_unlock(&file_priv->mm.lock);
1889 }
1890
1891 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1892 {
1893         struct drm_i915_file_private *file_priv;
1894         int ret;
1895
1896         DRM_DEBUG("\n");
1897
1898         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1899         if (!file_priv)
1900                 return -ENOMEM;
1901
1902         file->driver_priv = file_priv;
1903         file_priv->dev_priv = i915;
1904         file_priv->file = file;
1905
1906         spin_lock_init(&file_priv->mm.lock);
1907         INIT_LIST_HEAD(&file_priv->mm.request_list);
1908
1909         file_priv->bsd_engine = -1;
1910         file_priv->hang_timestamp = jiffies;
1911
1912         ret = i915_gem_context_open(i915, file);
1913         if (ret)
1914                 kfree(file_priv);
1915
1916         return ret;
1917 }
1918
1919 /**
1920  * i915_gem_track_fb - update frontbuffer tracking
1921  * @old: current GEM buffer for the frontbuffer slots
1922  * @new: new GEM buffer for the frontbuffer slots
1923  * @frontbuffer_bits: bitmask of frontbuffer slots
1924  *
1925  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1926  * from @old and setting them in @new. Both @old and @new can be NULL.
1927  */
1928 void i915_gem_track_fb(struct drm_i915_gem_object *old,
1929                        struct drm_i915_gem_object *new,
1930                        unsigned frontbuffer_bits)
1931 {
1932         /* Control of individual bits within the mask are guarded by
1933          * the owning plane->mutex, i.e. we can never see concurrent
1934          * manipulation of individual bits. But since the bitfield as a whole
1935          * is updated using RMW, we need to use atomics in order to update
1936          * the bits.
1937          */
1938         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
1939                      BITS_PER_TYPE(atomic_t));
1940
1941         if (old) {
1942                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1943                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
1944         }
1945
1946         if (new) {
1947                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1948                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
1949         }
1950 }
1951
1952 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1953 #include "selftests/mock_gem_device.c"
1954 #include "selftests/i915_gem.c"
1955 #endif