381face3cedb79418da9f6da29d94596f5991ecd
[linux-2.6-microblaze.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3  *
4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30  */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <linux/sched.h>
35 #include <linux/pagemap.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/file.h>
38 #include <drm/drm_cache.h>
39 #include <drm/ttm/ttm_bo_driver.h>
40 #include <drm/ttm/ttm_page_alloc.h>
41 #include <drm/ttm/ttm_set_memory.h>
42
43 /**
44  * Allocates a ttm structure for the given BO.
45  */
46 int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
47 {
48         struct ttm_bo_device *bdev = bo->bdev;
49         uint32_t page_flags = 0;
50
51         dma_resv_assert_held(bo->base.resv);
52
53         if (bo->ttm)
54                 return 0;
55
56         if (bdev->need_dma32)
57                 page_flags |= TTM_PAGE_FLAG_DMA32;
58
59         if (bdev->no_retry)
60                 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
61
62         switch (bo->type) {
63         case ttm_bo_type_device:
64                 if (zero_alloc)
65                         page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
66                 break;
67         case ttm_bo_type_kernel:
68                 break;
69         case ttm_bo_type_sg:
70                 page_flags |= TTM_PAGE_FLAG_SG;
71                 break;
72         default:
73                 pr_err("Illegal buffer object type\n");
74                 return -EINVAL;
75         }
76
77         bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
78         if (unlikely(bo->ttm == NULL))
79                 return -ENOMEM;
80
81         return 0;
82 }
83
84 /**
85  * Allocates storage for pointers to the pages that back the ttm.
86  */
87 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
88 {
89         ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
90                         GFP_KERNEL | __GFP_ZERO);
91         if (!ttm->pages)
92                 return -ENOMEM;
93         return 0;
94 }
95
96 static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
97 {
98         ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
99                                           sizeof(*ttm->ttm.pages) +
100                                           sizeof(*ttm->dma_address),
101                                           GFP_KERNEL | __GFP_ZERO);
102         if (!ttm->ttm.pages)
103                 return -ENOMEM;
104         ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
105         return 0;
106 }
107
108 static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
109 {
110         ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
111                                           sizeof(*ttm->dma_address),
112                                           GFP_KERNEL | __GFP_ZERO);
113         if (!ttm->dma_address)
114                 return -ENOMEM;
115         return 0;
116 }
117
118 static int ttm_tt_set_page_caching(struct page *p,
119                                    enum ttm_caching_state c_old,
120                                    enum ttm_caching_state c_new)
121 {
122         int ret = 0;
123
124         if (PageHighMem(p))
125                 return 0;
126
127         if (c_old != tt_cached) {
128                 /* p isn't in the default caching state, set it to
129                  * writeback first to free its current memtype. */
130
131                 ret = ttm_set_pages_wb(p, 1);
132                 if (ret)
133                         return ret;
134         }
135
136         if (c_new == tt_wc)
137                 ret = ttm_set_pages_wc(p, 1);
138         else if (c_new == tt_uncached)
139                 ret = ttm_set_pages_uc(p, 1);
140
141         return ret;
142 }
143
144 /*
145  * Change caching policy for the linear kernel map
146  * for range of pages in a ttm.
147  */
148
149 static int ttm_tt_set_caching(struct ttm_tt *ttm,
150                               enum ttm_caching_state c_state)
151 {
152         int i, j;
153         struct page *cur_page;
154         int ret;
155
156         if (ttm->caching_state == c_state)
157                 return 0;
158
159         if (!ttm_tt_is_populated(ttm)) {
160                 /* Change caching but don't populate */
161                 ttm->caching_state = c_state;
162                 return 0;
163         }
164
165         if (ttm->caching_state == tt_cached)
166                 drm_clflush_pages(ttm->pages, ttm->num_pages);
167
168         for (i = 0; i < ttm->num_pages; ++i) {
169                 cur_page = ttm->pages[i];
170                 if (likely(cur_page != NULL)) {
171                         ret = ttm_tt_set_page_caching(cur_page,
172                                                       ttm->caching_state,
173                                                       c_state);
174                         if (unlikely(ret != 0))
175                                 goto out_err;
176                 }
177         }
178
179         ttm->caching_state = c_state;
180
181         return 0;
182
183 out_err:
184         for (j = 0; j < i; ++j) {
185                 cur_page = ttm->pages[j];
186                 if (likely(cur_page != NULL)) {
187                         (void)ttm_tt_set_page_caching(cur_page, c_state,
188                                                       ttm->caching_state);
189                 }
190         }
191
192         return ret;
193 }
194
195 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
196 {
197         enum ttm_caching_state state;
198
199         if (placement & TTM_PL_FLAG_WC)
200                 state = tt_wc;
201         else if (placement & TTM_PL_FLAG_UNCACHED)
202                 state = tt_uncached;
203         else
204                 state = tt_cached;
205
206         return ttm_tt_set_caching(ttm, state);
207 }
208 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
209
210 void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
211 {
212         ttm_tt_unbind(bdev, ttm);
213
214         ttm_tt_unpopulate(bdev, ttm);
215
216         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
217             ttm->swap_storage)
218                 fput(ttm->swap_storage);
219
220         ttm->swap_storage = NULL;
221         bdev->driver->ttm_tt_destroy(bdev, ttm);
222 }
223
224 static void ttm_tt_init_fields(struct ttm_tt *ttm,
225                                struct ttm_buffer_object *bo,
226                                uint32_t page_flags)
227 {
228         ttm->num_pages = bo->num_pages;
229         ttm->caching_state = tt_cached;
230         ttm->page_flags = page_flags;
231         ttm_tt_set_unpopulated(ttm);
232         ttm->swap_storage = NULL;
233         ttm->sg = bo->sg;
234 }
235
236 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
237                 uint32_t page_flags)
238 {
239         ttm_tt_init_fields(ttm, bo, page_flags);
240
241         if (ttm_tt_alloc_page_directory(ttm)) {
242                 pr_err("Failed allocating page table\n");
243                 return -ENOMEM;
244         }
245         return 0;
246 }
247 EXPORT_SYMBOL(ttm_tt_init);
248
249 void ttm_tt_fini(struct ttm_tt *ttm)
250 {
251         kvfree(ttm->pages);
252         ttm->pages = NULL;
253 }
254 EXPORT_SYMBOL(ttm_tt_fini);
255
256 int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
257                     uint32_t page_flags)
258 {
259         struct ttm_tt *ttm = &ttm_dma->ttm;
260
261         ttm_tt_init_fields(ttm, bo, page_flags);
262
263         INIT_LIST_HEAD(&ttm_dma->pages_list);
264         if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
265                 pr_err("Failed allocating page table\n");
266                 return -ENOMEM;
267         }
268         return 0;
269 }
270 EXPORT_SYMBOL(ttm_dma_tt_init);
271
272 int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
273                    uint32_t page_flags)
274 {
275         struct ttm_tt *ttm = &ttm_dma->ttm;
276         int ret;
277
278         ttm_tt_init_fields(ttm, bo, page_flags);
279
280         INIT_LIST_HEAD(&ttm_dma->pages_list);
281         if (page_flags & TTM_PAGE_FLAG_SG)
282                 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
283         else
284                 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
285         if (ret) {
286                 pr_err("Failed allocating page table\n");
287                 return -ENOMEM;
288         }
289         return 0;
290 }
291 EXPORT_SYMBOL(ttm_sg_tt_init);
292
293 void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
294 {
295         struct ttm_tt *ttm = &ttm_dma->ttm;
296
297         if (ttm->pages)
298                 kvfree(ttm->pages);
299         else
300                 kvfree(ttm_dma->dma_address);
301         ttm->pages = NULL;
302         ttm_dma->dma_address = NULL;
303 }
304 EXPORT_SYMBOL(ttm_dma_tt_fini);
305
306 void ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
307 {
308         if (ttm_tt_is_bound(ttm)) {
309                 bdev->driver->ttm_tt_unbind(bdev, ttm);
310                 ttm_tt_set_unbound(ttm);
311         }
312 }
313
314 int ttm_tt_bind(struct ttm_bo_device *bdev,
315                 struct ttm_tt *ttm, struct ttm_resource *bo_mem,
316                 struct ttm_operation_ctx *ctx)
317 {
318         int ret = 0;
319
320         if (!ttm)
321                 return -EINVAL;
322
323         if (ttm_tt_is_bound(ttm))
324                 return 0;
325
326         ret = ttm_tt_populate(bdev, ttm, ctx);
327         if (ret)
328                 return ret;
329
330         ret = bdev->driver->ttm_tt_bind(bdev, ttm, bo_mem);
331         if (unlikely(ret != 0))
332                 return ret;
333
334         ttm_tt_set_bound(ttm);
335
336         return 0;
337 }
338 EXPORT_SYMBOL(ttm_tt_bind);
339
340 int ttm_tt_swapin(struct ttm_tt *ttm)
341 {
342         struct address_space *swap_space;
343         struct file *swap_storage;
344         struct page *from_page;
345         struct page *to_page;
346         int i;
347         int ret = -ENOMEM;
348
349         swap_storage = ttm->swap_storage;
350         BUG_ON(swap_storage == NULL);
351
352         swap_space = swap_storage->f_mapping;
353
354         for (i = 0; i < ttm->num_pages; ++i) {
355                 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
356
357                 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
358                 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
359
360                 if (IS_ERR(from_page)) {
361                         ret = PTR_ERR(from_page);
362                         goto out_err;
363                 }
364                 to_page = ttm->pages[i];
365                 if (unlikely(to_page == NULL))
366                         goto out_err;
367
368                 copy_highpage(to_page, from_page);
369                 put_page(from_page);
370         }
371
372         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
373                 fput(swap_storage);
374         ttm->swap_storage = NULL;
375         ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
376
377         return 0;
378 out_err:
379         return ret;
380 }
381
382 int ttm_tt_swapout(struct ttm_bo_device *bdev,
383                    struct ttm_tt *ttm, struct file *persistent_swap_storage)
384 {
385         struct address_space *swap_space;
386         struct file *swap_storage;
387         struct page *from_page;
388         struct page *to_page;
389         int i;
390         int ret = -ENOMEM;
391
392         BUG_ON(ttm->caching_state != tt_cached);
393
394         if (!persistent_swap_storage) {
395                 swap_storage = shmem_file_setup("ttm swap",
396                                                 ttm->num_pages << PAGE_SHIFT,
397                                                 0);
398                 if (IS_ERR(swap_storage)) {
399                         pr_err("Failed allocating swap storage\n");
400                         return PTR_ERR(swap_storage);
401                 }
402         } else {
403                 swap_storage = persistent_swap_storage;
404         }
405
406         swap_space = swap_storage->f_mapping;
407
408         for (i = 0; i < ttm->num_pages; ++i) {
409                 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
410
411                 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
412
413                 from_page = ttm->pages[i];
414                 if (unlikely(from_page == NULL))
415                         continue;
416
417                 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
418                 if (IS_ERR(to_page)) {
419                         ret = PTR_ERR(to_page);
420                         goto out_err;
421                 }
422                 copy_highpage(to_page, from_page);
423                 set_page_dirty(to_page);
424                 mark_page_accessed(to_page);
425                 put_page(to_page);
426         }
427
428         ttm_tt_unpopulate(bdev, ttm);
429         ttm->swap_storage = swap_storage;
430         ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
431         if (persistent_swap_storage)
432                 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
433
434         return 0;
435 out_err:
436         if (!persistent_swap_storage)
437                 fput(swap_storage);
438
439         return ret;
440 }
441
442 static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
443 {
444         pgoff_t i;
445
446         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
447                 return;
448
449         for (i = 0; i < ttm->num_pages; ++i)
450                 ttm->pages[i]->mapping = bdev->dev_mapping;
451 }
452
453 int ttm_tt_populate(struct ttm_bo_device *bdev,
454                     struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
455 {
456         int ret;
457
458         if (ttm_tt_is_populated(ttm))
459                 return 0;
460
461         if (bdev->driver->ttm_tt_populate)
462                 ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
463         else
464                 ret = ttm_pool_populate(ttm, ctx);
465         if (!ret)
466                 ttm_tt_add_mapping(bdev, ttm);
467         return ret;
468 }
469
470 static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
471 {
472         pgoff_t i;
473         struct page **page = ttm->pages;
474
475         if (ttm->page_flags & TTM_PAGE_FLAG_SG)
476                 return;
477
478         for (i = 0; i < ttm->num_pages; ++i) {
479                 (*page)->mapping = NULL;
480                 (*page++)->index = 0;
481         }
482 }
483
484 void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
485                        struct ttm_tt *ttm)
486 {
487         if (!ttm_tt_is_populated(ttm))
488                 return;
489
490         ttm_tt_clear_mapping(ttm);
491         if (bdev->driver->ttm_tt_unpopulate)
492                 bdev->driver->ttm_tt_unpopulate(bdev, ttm);
493         else
494                 ttm_pool_unpopulate(ttm);
495 }