Merge tag 'drm-intel-next-2020-08-24-1' of git://anongit.freedesktop.org/drm/drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / vmwgfx / vmwgfx_ttm_buffer.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
32
33 static const struct ttm_place vram_placement_flags = {
34         .fpfn = 0,
35         .lpfn = 0,
36         .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37 };
38
39 static const struct ttm_place vram_ne_placement_flags = {
40         .fpfn = 0,
41         .lpfn = 0,
42         .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43 };
44
45 static const struct ttm_place sys_placement_flags = {
46         .fpfn = 0,
47         .lpfn = 0,
48         .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49 };
50
51 static const struct ttm_place sys_ne_placement_flags = {
52         .fpfn = 0,
53         .lpfn = 0,
54         .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55 };
56
57 static const struct ttm_place gmr_placement_flags = {
58         .fpfn = 0,
59         .lpfn = 0,
60         .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61 };
62
63 static const struct ttm_place gmr_ne_placement_flags = {
64         .fpfn = 0,
65         .lpfn = 0,
66         .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67 };
68
69 static const struct ttm_place mob_placement_flags = {
70         .fpfn = 0,
71         .lpfn = 0,
72         .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73 };
74
75 static const struct ttm_place mob_ne_placement_flags = {
76         .fpfn = 0,
77         .lpfn = 0,
78         .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
79 };
80
81 struct ttm_placement vmw_vram_placement = {
82         .num_placement = 1,
83         .placement = &vram_placement_flags,
84         .num_busy_placement = 1,
85         .busy_placement = &vram_placement_flags
86 };
87
88 static const struct ttm_place vram_gmr_placement_flags[] = {
89         {
90                 .fpfn = 0,
91                 .lpfn = 0,
92                 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
93         }, {
94                 .fpfn = 0,
95                 .lpfn = 0,
96                 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
97         }
98 };
99
100 static const struct ttm_place gmr_vram_placement_flags[] = {
101         {
102                 .fpfn = 0,
103                 .lpfn = 0,
104                 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
105         }, {
106                 .fpfn = 0,
107                 .lpfn = 0,
108                 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
109         }
110 };
111
112 struct ttm_placement vmw_vram_gmr_placement = {
113         .num_placement = 2,
114         .placement = vram_gmr_placement_flags,
115         .num_busy_placement = 1,
116         .busy_placement = &gmr_placement_flags
117 };
118
119 static const struct ttm_place vram_gmr_ne_placement_flags[] = {
120         {
121                 .fpfn = 0,
122                 .lpfn = 0,
123                 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
124                          TTM_PL_FLAG_NO_EVICT
125         }, {
126                 .fpfn = 0,
127                 .lpfn = 0,
128                 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
129                          TTM_PL_FLAG_NO_EVICT
130         }
131 };
132
133 struct ttm_placement vmw_vram_gmr_ne_placement = {
134         .num_placement = 2,
135         .placement = vram_gmr_ne_placement_flags,
136         .num_busy_placement = 1,
137         .busy_placement = &gmr_ne_placement_flags
138 };
139
140 struct ttm_placement vmw_vram_sys_placement = {
141         .num_placement = 1,
142         .placement = &vram_placement_flags,
143         .num_busy_placement = 1,
144         .busy_placement = &sys_placement_flags
145 };
146
147 struct ttm_placement vmw_vram_ne_placement = {
148         .num_placement = 1,
149         .placement = &vram_ne_placement_flags,
150         .num_busy_placement = 1,
151         .busy_placement = &vram_ne_placement_flags
152 };
153
154 struct ttm_placement vmw_sys_placement = {
155         .num_placement = 1,
156         .placement = &sys_placement_flags,
157         .num_busy_placement = 1,
158         .busy_placement = &sys_placement_flags
159 };
160
161 struct ttm_placement vmw_sys_ne_placement = {
162         .num_placement = 1,
163         .placement = &sys_ne_placement_flags,
164         .num_busy_placement = 1,
165         .busy_placement = &sys_ne_placement_flags
166 };
167
168 static const struct ttm_place evictable_placement_flags[] = {
169         {
170                 .fpfn = 0,
171                 .lpfn = 0,
172                 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
173         }, {
174                 .fpfn = 0,
175                 .lpfn = 0,
176                 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
177         }, {
178                 .fpfn = 0,
179                 .lpfn = 0,
180                 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
181         }, {
182                 .fpfn = 0,
183                 .lpfn = 0,
184                 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
185         }
186 };
187
188 static const struct ttm_place nonfixed_placement_flags[] = {
189         {
190                 .fpfn = 0,
191                 .lpfn = 0,
192                 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
193         }, {
194                 .fpfn = 0,
195                 .lpfn = 0,
196                 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
197         }, {
198                 .fpfn = 0,
199                 .lpfn = 0,
200                 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
201         }
202 };
203
204 struct ttm_placement vmw_evictable_placement = {
205         .num_placement = 4,
206         .placement = evictable_placement_flags,
207         .num_busy_placement = 1,
208         .busy_placement = &sys_placement_flags
209 };
210
211 struct ttm_placement vmw_srf_placement = {
212         .num_placement = 1,
213         .num_busy_placement = 2,
214         .placement = &gmr_placement_flags,
215         .busy_placement = gmr_vram_placement_flags
216 };
217
218 struct ttm_placement vmw_mob_placement = {
219         .num_placement = 1,
220         .num_busy_placement = 1,
221         .placement = &mob_placement_flags,
222         .busy_placement = &mob_placement_flags
223 };
224
225 struct ttm_placement vmw_mob_ne_placement = {
226         .num_placement = 1,
227         .num_busy_placement = 1,
228         .placement = &mob_ne_placement_flags,
229         .busy_placement = &mob_ne_placement_flags
230 };
231
232 struct ttm_placement vmw_nonfixed_placement = {
233         .num_placement = 3,
234         .placement = nonfixed_placement_flags,
235         .num_busy_placement = 1,
236         .busy_placement = &sys_placement_flags
237 };
238
239 struct vmw_ttm_tt {
240         struct ttm_dma_tt dma_ttm;
241         struct vmw_private *dev_priv;
242         int gmr_id;
243         struct vmw_mob *mob;
244         int mem_type;
245         struct sg_table sgt;
246         struct vmw_sg_table vsgt;
247         uint64_t sg_alloc_size;
248         bool mapped;
249 };
250
251 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
252
253 /**
254  * Helper functions to advance a struct vmw_piter iterator.
255  *
256  * @viter: Pointer to the iterator.
257  *
258  * These functions return false if past the end of the list,
259  * true otherwise. Functions are selected depending on the current
260  * DMA mapping mode.
261  */
262 static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
263 {
264         return ++(viter->i) < viter->num_pages;
265 }
266
267 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
268 {
269         bool ret = __vmw_piter_non_sg_next(viter);
270
271         return __sg_page_iter_dma_next(&viter->iter) && ret;
272 }
273
274
275 /**
276  * Helper functions to return a pointer to the current page.
277  *
278  * @viter: Pointer to the iterator
279  *
280  * These functions return a pointer to the page currently
281  * pointed to by @viter. Functions are selected depending on the
282  * current mapping mode.
283  */
284 static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
285 {
286         return viter->pages[viter->i];
287 }
288
289 /**
290  * Helper functions to return the DMA address of the current page.
291  *
292  * @viter: Pointer to the iterator
293  *
294  * These functions return the DMA address of the page currently
295  * pointed to by @viter. Functions are selected depending on the
296  * current mapping mode.
297  */
298 static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
299 {
300         return page_to_phys(viter->pages[viter->i]);
301 }
302
303 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
304 {
305         return viter->addrs[viter->i];
306 }
307
308 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
309 {
310         return sg_page_iter_dma_address(&viter->iter);
311 }
312
313
314 /**
315  * vmw_piter_start - Initialize a struct vmw_piter.
316  *
317  * @viter: Pointer to the iterator to initialize
318  * @vsgt: Pointer to a struct vmw_sg_table to initialize from
319  *
320  * Note that we're following the convention of __sg_page_iter_start, so that
321  * the iterator doesn't point to a valid page after initialization; it has
322  * to be advanced one step first.
323  */
324 void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
325                      unsigned long p_offset)
326 {
327         viter->i = p_offset - 1;
328         viter->num_pages = vsgt->num_pages;
329         viter->page = &__vmw_piter_non_sg_page;
330         viter->pages = vsgt->pages;
331         switch (vsgt->mode) {
332         case vmw_dma_phys:
333                 viter->next = &__vmw_piter_non_sg_next;
334                 viter->dma_address = &__vmw_piter_phys_addr;
335                 break;
336         case vmw_dma_alloc_coherent:
337                 viter->next = &__vmw_piter_non_sg_next;
338                 viter->dma_address = &__vmw_piter_dma_addr;
339                 viter->addrs = vsgt->addrs;
340                 break;
341         case vmw_dma_map_populate:
342         case vmw_dma_map_bind:
343                 viter->next = &__vmw_piter_sg_next;
344                 viter->dma_address = &__vmw_piter_sg_addr;
345                 __sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
346                                      vsgt->sgt->orig_nents, p_offset);
347                 break;
348         default:
349                 BUG();
350         }
351 }
352
353 /**
354  * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
355  * TTM pages
356  *
357  * @vmw_tt: Pointer to a struct vmw_ttm_backend
358  *
359  * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
360  */
361 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
362 {
363         struct device *dev = vmw_tt->dev_priv->dev->dev;
364
365         dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
366                 DMA_BIDIRECTIONAL);
367         vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
368 }
369
370 /**
371  * vmw_ttm_map_for_dma - map TTM pages to get device addresses
372  *
373  * @vmw_tt: Pointer to a struct vmw_ttm_backend
374  *
375  * This function is used to get device addresses from the kernel DMA layer.
376  * However, it's violating the DMA API in that when this operation has been
377  * performed, it's illegal for the CPU to write to the pages without first
378  * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
379  * therefore only legal to call this function if we know that the function
380  * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
381  * a CPU write buffer flush.
382  */
383 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
384 {
385         struct device *dev = vmw_tt->dev_priv->dev->dev;
386         int ret;
387
388         ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
389                          DMA_BIDIRECTIONAL);
390         if (unlikely(ret == 0))
391                 return -ENOMEM;
392
393         vmw_tt->sgt.nents = ret;
394
395         return 0;
396 }
397
398 /**
399  * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
400  *
401  * @vmw_tt: Pointer to a struct vmw_ttm_tt
402  *
403  * Select the correct function for and make sure the TTM pages are
404  * visible to the device. Allocate storage for the device mappings.
405  * If a mapping has already been performed, indicated by the storage
406  * pointer being non NULL, the function returns success.
407  */
408 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
409 {
410         struct vmw_private *dev_priv = vmw_tt->dev_priv;
411         struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
412         struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
413         struct ttm_operation_ctx ctx = {
414                 .interruptible = true,
415                 .no_wait_gpu = false
416         };
417         struct vmw_piter iter;
418         dma_addr_t old;
419         int ret = 0;
420         static size_t sgl_size;
421         static size_t sgt_size;
422
423         if (vmw_tt->mapped)
424                 return 0;
425
426         vsgt->mode = dev_priv->map_mode;
427         vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
428         vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
429         vsgt->addrs = vmw_tt->dma_ttm.dma_address;
430         vsgt->sgt = &vmw_tt->sgt;
431
432         switch (dev_priv->map_mode) {
433         case vmw_dma_map_bind:
434         case vmw_dma_map_populate:
435                 if (unlikely(!sgl_size)) {
436                         sgl_size = ttm_round_pot(sizeof(struct scatterlist));
437                         sgt_size = ttm_round_pot(sizeof(struct sg_table));
438                 }
439                 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
440                 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
441                 if (unlikely(ret != 0))
442                         return ret;
443
444                 ret = __sg_alloc_table_from_pages
445                         (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
446                          (unsigned long) vsgt->num_pages << PAGE_SHIFT,
447                          dma_get_max_seg_size(dev_priv->dev->dev),
448                          GFP_KERNEL);
449                 if (unlikely(ret != 0))
450                         goto out_sg_alloc_fail;
451
452                 if (vsgt->num_pages > vmw_tt->sgt.nents) {
453                         uint64_t over_alloc =
454                                 sgl_size * (vsgt->num_pages -
455                                             vmw_tt->sgt.nents);
456
457                         ttm_mem_global_free(glob, over_alloc);
458                         vmw_tt->sg_alloc_size -= over_alloc;
459                 }
460
461                 ret = vmw_ttm_map_for_dma(vmw_tt);
462                 if (unlikely(ret != 0))
463                         goto out_map_fail;
464
465                 break;
466         default:
467                 break;
468         }
469
470         old = ~((dma_addr_t) 0);
471         vmw_tt->vsgt.num_regions = 0;
472         for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
473                 dma_addr_t cur = vmw_piter_dma_addr(&iter);
474
475                 if (cur != old + PAGE_SIZE)
476                         vmw_tt->vsgt.num_regions++;
477                 old = cur;
478         }
479
480         vmw_tt->mapped = true;
481         return 0;
482
483 out_map_fail:
484         sg_free_table(vmw_tt->vsgt.sgt);
485         vmw_tt->vsgt.sgt = NULL;
486 out_sg_alloc_fail:
487         ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
488         return ret;
489 }
490
491 /**
492  * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
493  *
494  * @vmw_tt: Pointer to a struct vmw_ttm_tt
495  *
496  * Tear down any previously set up device DMA mappings and free
497  * any storage space allocated for them. If there are no mappings set up,
498  * this function is a NOP.
499  */
500 static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
501 {
502         struct vmw_private *dev_priv = vmw_tt->dev_priv;
503
504         if (!vmw_tt->vsgt.sgt)
505                 return;
506
507         switch (dev_priv->map_mode) {
508         case vmw_dma_map_bind:
509         case vmw_dma_map_populate:
510                 vmw_ttm_unmap_from_dma(vmw_tt);
511                 sg_free_table(vmw_tt->vsgt.sgt);
512                 vmw_tt->vsgt.sgt = NULL;
513                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
514                                     vmw_tt->sg_alloc_size);
515                 break;
516         default:
517                 break;
518         }
519         vmw_tt->mapped = false;
520 }
521
522 /**
523  * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
524  * TTM buffer object
525  *
526  * @bo: Pointer to a struct ttm_buffer_object
527  *
528  * Returns a pointer to a struct vmw_sg_table object. The object should
529  * not be freed after use.
530  * Note that for the device addresses to be valid, the buffer object must
531  * either be reserved or pinned.
532  */
533 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
534 {
535         struct vmw_ttm_tt *vmw_tt =
536                 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
537
538         return &vmw_tt->vsgt;
539 }
540
541
542 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
543 {
544         struct vmw_ttm_tt *vmw_be =
545                 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
546         int ret;
547
548         ret = vmw_ttm_map_dma(vmw_be);
549         if (unlikely(ret != 0))
550                 return ret;
551
552         vmw_be->gmr_id = bo_mem->start;
553         vmw_be->mem_type = bo_mem->mem_type;
554
555         switch (bo_mem->mem_type) {
556         case VMW_PL_GMR:
557                 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
558                                     ttm->num_pages, vmw_be->gmr_id);
559         case VMW_PL_MOB:
560                 if (unlikely(vmw_be->mob == NULL)) {
561                         vmw_be->mob =
562                                 vmw_mob_create(ttm->num_pages);
563                         if (unlikely(vmw_be->mob == NULL))
564                                 return -ENOMEM;
565                 }
566
567                 return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
568                                     &vmw_be->vsgt, ttm->num_pages,
569                                     vmw_be->gmr_id);
570         default:
571                 BUG();
572         }
573         return 0;
574 }
575
576 static void vmw_ttm_unbind(struct ttm_tt *ttm)
577 {
578         struct vmw_ttm_tt *vmw_be =
579                 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
580
581         switch (vmw_be->mem_type) {
582         case VMW_PL_GMR:
583                 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
584                 break;
585         case VMW_PL_MOB:
586                 vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
587                 break;
588         default:
589                 BUG();
590         }
591
592         if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
593                 vmw_ttm_unmap_dma(vmw_be);
594 }
595
596
597 static void vmw_ttm_destroy(struct ttm_tt *ttm)
598 {
599         struct vmw_ttm_tt *vmw_be =
600                 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
601
602         vmw_ttm_unmap_dma(vmw_be);
603         if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
604                 ttm_dma_tt_fini(&vmw_be->dma_ttm);
605         else
606                 ttm_tt_fini(ttm);
607
608         if (vmw_be->mob)
609                 vmw_mob_destroy(vmw_be->mob);
610
611         kfree(vmw_be);
612 }
613
614
615 static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
616 {
617         struct vmw_ttm_tt *vmw_tt =
618                 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
619         struct vmw_private *dev_priv = vmw_tt->dev_priv;
620         struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
621         int ret;
622
623         if (ttm->state != tt_unpopulated)
624                 return 0;
625
626         if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
627                 size_t size =
628                         ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
629                 ret = ttm_mem_global_alloc(glob, size, ctx);
630                 if (unlikely(ret != 0))
631                         return ret;
632
633                 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
634                                         ctx);
635                 if (unlikely(ret != 0))
636                         ttm_mem_global_free(glob, size);
637         } else
638                 ret = ttm_pool_populate(ttm, ctx);
639
640         return ret;
641 }
642
643 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
644 {
645         struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
646                                                  dma_ttm.ttm);
647         struct vmw_private *dev_priv = vmw_tt->dev_priv;
648         struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
649
650
651         if (vmw_tt->mob) {
652                 vmw_mob_destroy(vmw_tt->mob);
653                 vmw_tt->mob = NULL;
654         }
655
656         vmw_ttm_unmap_dma(vmw_tt);
657         if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
658                 size_t size =
659                         ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
660
661                 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
662                 ttm_mem_global_free(glob, size);
663         } else
664                 ttm_pool_unpopulate(ttm);
665 }
666
667 static struct ttm_backend_func vmw_ttm_func = {
668         .bind = vmw_ttm_bind,
669         .unbind = vmw_ttm_unbind,
670         .destroy = vmw_ttm_destroy,
671 };
672
673 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
674                                         uint32_t page_flags)
675 {
676         struct vmw_ttm_tt *vmw_be;
677         int ret;
678
679         vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
680         if (!vmw_be)
681                 return NULL;
682
683         vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
684         vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
685         vmw_be->mob = NULL;
686
687         if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
688                 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
689         else
690                 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
691         if (unlikely(ret != 0))
692                 goto out_no_init;
693
694         return &vmw_be->dma_ttm.ttm;
695 out_no_init:
696         kfree(vmw_be);
697         return NULL;
698 }
699
700 static void vmw_evict_flags(struct ttm_buffer_object *bo,
701                      struct ttm_placement *placement)
702 {
703         *placement = vmw_sys_placement;
704 }
705
706 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
707 {
708         struct ttm_object_file *tfile =
709                 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
710
711         return vmw_user_bo_verify_access(bo, tfile);
712 }
713
714 static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
715 {
716         struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
717
718         switch (mem->mem_type) {
719         case TTM_PL_SYSTEM:
720         case VMW_PL_GMR:
721         case VMW_PL_MOB:
722                 return 0;
723         case TTM_PL_VRAM:
724                 mem->bus.offset = mem->start << PAGE_SHIFT;
725                 mem->bus.base = dev_priv->vram_start;
726                 mem->bus.is_iomem = true;
727                 break;
728         default:
729                 return -EINVAL;
730         }
731         return 0;
732 }
733
734 /**
735  * vmw_move_notify - TTM move_notify_callback
736  *
737  * @bo: The TTM buffer object about to move.
738  * @mem: The struct ttm_resource indicating to what memory
739  *       region the move is taking place.
740  *
741  * Calls move_notify for all subsystems needing it.
742  * (currently only resources).
743  */
744 static void vmw_move_notify(struct ttm_buffer_object *bo,
745                             bool evict,
746                             struct ttm_resource *mem)
747 {
748         vmw_bo_move_notify(bo, mem);
749         vmw_query_move_notify(bo, mem);
750 }
751
752
753 /**
754  * vmw_swap_notify - TTM move_notify_callback
755  *
756  * @bo: The TTM buffer object about to be swapped out.
757  */
758 static void vmw_swap_notify(struct ttm_buffer_object *bo)
759 {
760         vmw_bo_swap_notify(bo);
761         (void) ttm_bo_wait(bo, false, false);
762 }
763
764
765 struct ttm_bo_driver vmw_bo_driver = {
766         .ttm_tt_create = &vmw_ttm_tt_create,
767         .ttm_tt_populate = &vmw_ttm_populate,
768         .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
769         .eviction_valuable = ttm_bo_eviction_valuable,
770         .evict_flags = vmw_evict_flags,
771         .move = NULL,
772         .verify_access = vmw_verify_access,
773         .move_notify = vmw_move_notify,
774         .swap_notify = vmw_swap_notify,
775         .io_mem_reserve = &vmw_ttm_io_mem_reserve,
776 };
777
778 int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
779                                unsigned long bo_size,
780                                struct ttm_buffer_object **bo_p)
781 {
782         struct ttm_operation_ctx ctx = {
783                 .interruptible = false,
784                 .no_wait_gpu = false
785         };
786         struct ttm_buffer_object *bo;
787         int ret;
788
789         ret = ttm_bo_create(&dev_priv->bdev, bo_size,
790                             ttm_bo_type_device,
791                             &vmw_sys_ne_placement,
792                             0, false, &bo);
793
794         if (unlikely(ret != 0))
795                 return ret;
796
797         ret = ttm_bo_reserve(bo, false, true, NULL);
798         BUG_ON(ret != 0);
799         ret = vmw_ttm_populate(bo->ttm, &ctx);
800         if (likely(ret == 0)) {
801                 struct vmw_ttm_tt *vmw_tt =
802                         container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
803                 ret = vmw_ttm_map_dma(vmw_tt);
804         }
805
806         ttm_bo_unreserve(bo);
807
808         if (likely(ret == 0))
809                 *bo_p = bo;
810         return ret;
811 }