zram: add stat to gather incompressible pages since zram set up
[linux-2.6-microblaze.git] / drivers / dma-buf / dma-resv.c
1 /*
2  * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
3  *
4  * Based on bo.c which bears the following copyright notice,
5  * but is dual licensed:
6  *
7  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
8  * All Rights Reserved.
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a
11  * copy of this software and associated documentation files (the
12  * "Software"), to deal in the Software without restriction, including
13  * without limitation the rights to use, copy, modify, merge, publish,
14  * distribute, sub license, and/or sell copies of the Software, and to
15  * permit persons to whom the Software is furnished to do so, subject to
16  * the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the
19  * next paragraph) shall be included in all copies or substantial portions
20  * of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
26  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
27  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
28  * USE OR OTHER DEALINGS IN THE SOFTWARE.
29  *
30  **************************************************************************/
31 /*
32  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
33  */
34
35 #include <linux/dma-resv.h>
36 #include <linux/export.h>
37 #include <linux/mm.h>
38 #include <linux/sched/mm.h>
39 #include <linux/mmu_notifier.h>
40
41 /**
42  * DOC: Reservation Object Overview
43  *
44  * The reservation object provides a mechanism to manage shared and
45  * exclusive fences associated with a buffer.  A reservation object
46  * can have attached one exclusive fence (normally associated with
47  * write operations) or N shared fences (read operations).  The RCU
48  * mechanism is used to protect read access to fences from locked
49  * write-side updates.
50  */
51
52 DEFINE_WD_CLASS(reservation_ww_class);
53 EXPORT_SYMBOL(reservation_ww_class);
54
55 /**
56  * dma_resv_list_alloc - allocate fence list
57  * @shared_max: number of fences we need space for
58  *
59  * Allocate a new dma_resv_list and make sure to correctly initialize
60  * shared_max.
61  */
62 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
63 {
64         struct dma_resv_list *list;
65
66         list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
67         if (!list)
68                 return NULL;
69
70         list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
71                 sizeof(*list->shared);
72
73         return list;
74 }
75
76 /**
77  * dma_resv_list_free - free fence list
78  * @list: list to free
79  *
80  * Free a dma_resv_list and make sure to drop all references.
81  */
82 static void dma_resv_list_free(struct dma_resv_list *list)
83 {
84         unsigned int i;
85
86         if (!list)
87                 return;
88
89         for (i = 0; i < list->shared_count; ++i)
90                 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
91
92         kfree_rcu(list, rcu);
93 }
94
95 #if IS_ENABLED(CONFIG_LOCKDEP)
96 static int __init dma_resv_lockdep(void)
97 {
98         struct mm_struct *mm = mm_alloc();
99         struct ww_acquire_ctx ctx;
100         struct dma_resv obj;
101         struct address_space mapping;
102         int ret;
103
104         if (!mm)
105                 return -ENOMEM;
106
107         dma_resv_init(&obj);
108         address_space_init_once(&mapping);
109
110         mmap_read_lock(mm);
111         ww_acquire_init(&ctx, &reservation_ww_class);
112         ret = dma_resv_lock(&obj, &ctx);
113         if (ret == -EDEADLK)
114                 dma_resv_lock_slow(&obj, &ctx);
115         fs_reclaim_acquire(GFP_KERNEL);
116         /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
117         i_mmap_lock_write(&mapping);
118         i_mmap_unlock_write(&mapping);
119 #ifdef CONFIG_MMU_NOTIFIER
120         lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
121         __dma_fence_might_wait();
122         lock_map_release(&__mmu_notifier_invalidate_range_start_map);
123 #else
124         __dma_fence_might_wait();
125 #endif
126         fs_reclaim_release(GFP_KERNEL);
127         ww_mutex_unlock(&obj.lock);
128         ww_acquire_fini(&ctx);
129         mmap_read_unlock(mm);
130         
131         mmput(mm);
132
133         return 0;
134 }
135 subsys_initcall(dma_resv_lockdep);
136 #endif
137
138 /**
139  * dma_resv_init - initialize a reservation object
140  * @obj: the reservation object
141  */
142 void dma_resv_init(struct dma_resv *obj)
143 {
144         ww_mutex_init(&obj->lock, &reservation_ww_class);
145         seqcount_ww_mutex_init(&obj->seq, &obj->lock);
146
147         RCU_INIT_POINTER(obj->fence, NULL);
148         RCU_INIT_POINTER(obj->fence_excl, NULL);
149 }
150 EXPORT_SYMBOL(dma_resv_init);
151
152 /**
153  * dma_resv_fini - destroys a reservation object
154  * @obj: the reservation object
155  */
156 void dma_resv_fini(struct dma_resv *obj)
157 {
158         struct dma_resv_list *fobj;
159         struct dma_fence *excl;
160
161         /*
162          * This object should be dead and all references must have
163          * been released to it, so no need to be protected with rcu.
164          */
165         excl = rcu_dereference_protected(obj->fence_excl, 1);
166         if (excl)
167                 dma_fence_put(excl);
168
169         fobj = rcu_dereference_protected(obj->fence, 1);
170         dma_resv_list_free(fobj);
171         ww_mutex_destroy(&obj->lock);
172 }
173 EXPORT_SYMBOL(dma_resv_fini);
174
175 /**
176  * dma_resv_reserve_shared - Reserve space to add shared fences to
177  * a dma_resv.
178  * @obj: reservation object
179  * @num_fences: number of fences we want to add
180  *
181  * Should be called before dma_resv_add_shared_fence().  Must
182  * be called with obj->lock held.
183  *
184  * RETURNS
185  * Zero for success, or -errno
186  */
187 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
188 {
189         struct dma_resv_list *old, *new;
190         unsigned int i, j, k, max;
191
192         dma_resv_assert_held(obj);
193
194         old = dma_resv_get_list(obj);
195
196         if (old && old->shared_max) {
197                 if ((old->shared_count + num_fences) <= old->shared_max)
198                         return 0;
199                 else
200                         max = max(old->shared_count + num_fences,
201                                   old->shared_max * 2);
202         } else {
203                 max = 4;
204         }
205
206         new = dma_resv_list_alloc(max);
207         if (!new)
208                 return -ENOMEM;
209
210         /*
211          * no need to bump fence refcounts, rcu_read access
212          * requires the use of kref_get_unless_zero, and the
213          * references from the old struct are carried over to
214          * the new.
215          */
216         for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
217                 struct dma_fence *fence;
218
219                 fence = rcu_dereference_protected(old->shared[i],
220                                                   dma_resv_held(obj));
221                 if (dma_fence_is_signaled(fence))
222                         RCU_INIT_POINTER(new->shared[--k], fence);
223                 else
224                         RCU_INIT_POINTER(new->shared[j++], fence);
225         }
226         new->shared_count = j;
227
228         /*
229          * We are not changing the effective set of fences here so can
230          * merely update the pointer to the new array; both existing
231          * readers and new readers will see exactly the same set of
232          * active (unsignaled) shared fences. Individual fences and the
233          * old array are protected by RCU and so will not vanish under
234          * the gaze of the rcu_read_lock() readers.
235          */
236         rcu_assign_pointer(obj->fence, new);
237
238         if (!old)
239                 return 0;
240
241         /* Drop the references to the signaled fences */
242         for (i = k; i < max; ++i) {
243                 struct dma_fence *fence;
244
245                 fence = rcu_dereference_protected(new->shared[i],
246                                                   dma_resv_held(obj));
247                 dma_fence_put(fence);
248         }
249         kfree_rcu(old, rcu);
250
251         return 0;
252 }
253 EXPORT_SYMBOL(dma_resv_reserve_shared);
254
255 /**
256  * dma_resv_add_shared_fence - Add a fence to a shared slot
257  * @obj: the reservation object
258  * @fence: the shared fence to add
259  *
260  * Add a fence to a shared slot, obj->lock must be held, and
261  * dma_resv_reserve_shared() has been called.
262  */
263 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
264 {
265         struct dma_resv_list *fobj;
266         struct dma_fence *old;
267         unsigned int i, count;
268
269         dma_fence_get(fence);
270
271         dma_resv_assert_held(obj);
272
273         fobj = dma_resv_get_list(obj);
274         count = fobj->shared_count;
275
276         write_seqcount_begin(&obj->seq);
277
278         for (i = 0; i < count; ++i) {
279
280                 old = rcu_dereference_protected(fobj->shared[i],
281                                                 dma_resv_held(obj));
282                 if (old->context == fence->context ||
283                     dma_fence_is_signaled(old))
284                         goto replace;
285         }
286
287         BUG_ON(fobj->shared_count >= fobj->shared_max);
288         old = NULL;
289         count++;
290
291 replace:
292         RCU_INIT_POINTER(fobj->shared[i], fence);
293         /* pointer update must be visible before we extend the shared_count */
294         smp_store_mb(fobj->shared_count, count);
295
296         write_seqcount_end(&obj->seq);
297         dma_fence_put(old);
298 }
299 EXPORT_SYMBOL(dma_resv_add_shared_fence);
300
301 /**
302  * dma_resv_add_excl_fence - Add an exclusive fence.
303  * @obj: the reservation object
304  * @fence: the shared fence to add
305  *
306  * Add a fence to the exclusive slot.  The obj->lock must be held.
307  */
308 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
309 {
310         struct dma_fence *old_fence = dma_resv_get_excl(obj);
311         struct dma_resv_list *old;
312         u32 i = 0;
313
314         dma_resv_assert_held(obj);
315
316         old = dma_resv_get_list(obj);
317         if (old)
318                 i = old->shared_count;
319
320         if (fence)
321                 dma_fence_get(fence);
322
323         write_seqcount_begin(&obj->seq);
324         /* write_seqcount_begin provides the necessary memory barrier */
325         RCU_INIT_POINTER(obj->fence_excl, fence);
326         if (old)
327                 old->shared_count = 0;
328         write_seqcount_end(&obj->seq);
329
330         /* inplace update, no shared fences */
331         while (i--)
332                 dma_fence_put(rcu_dereference_protected(old->shared[i],
333                                                 dma_resv_held(obj)));
334
335         dma_fence_put(old_fence);
336 }
337 EXPORT_SYMBOL(dma_resv_add_excl_fence);
338
339 /**
340 * dma_resv_copy_fences - Copy all fences from src to dst.
341 * @dst: the destination reservation object
342 * @src: the source reservation object
343 *
344 * Copy all fences from src to dst. dst-lock must be held.
345 */
346 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
347 {
348         struct dma_resv_list *src_list, *dst_list;
349         struct dma_fence *old, *new;
350         unsigned i;
351
352         dma_resv_assert_held(dst);
353
354         rcu_read_lock();
355         src_list = rcu_dereference(src->fence);
356
357 retry:
358         if (src_list) {
359                 unsigned shared_count = src_list->shared_count;
360
361                 rcu_read_unlock();
362
363                 dst_list = dma_resv_list_alloc(shared_count);
364                 if (!dst_list)
365                         return -ENOMEM;
366
367                 rcu_read_lock();
368                 src_list = rcu_dereference(src->fence);
369                 if (!src_list || src_list->shared_count > shared_count) {
370                         kfree(dst_list);
371                         goto retry;
372                 }
373
374                 dst_list->shared_count = 0;
375                 for (i = 0; i < src_list->shared_count; ++i) {
376                         struct dma_fence *fence;
377
378                         fence = rcu_dereference(src_list->shared[i]);
379                         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
380                                      &fence->flags))
381                                 continue;
382
383                         if (!dma_fence_get_rcu(fence)) {
384                                 dma_resv_list_free(dst_list);
385                                 src_list = rcu_dereference(src->fence);
386                                 goto retry;
387                         }
388
389                         if (dma_fence_is_signaled(fence)) {
390                                 dma_fence_put(fence);
391                                 continue;
392                         }
393
394                         rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
395                 }
396         } else {
397                 dst_list = NULL;
398         }
399
400         new = dma_fence_get_rcu_safe(&src->fence_excl);
401         rcu_read_unlock();
402
403         src_list = dma_resv_get_list(dst);
404         old = dma_resv_get_excl(dst);
405
406         write_seqcount_begin(&dst->seq);
407         /* write_seqcount_begin provides the necessary memory barrier */
408         RCU_INIT_POINTER(dst->fence_excl, new);
409         RCU_INIT_POINTER(dst->fence, dst_list);
410         write_seqcount_end(&dst->seq);
411
412         dma_resv_list_free(src_list);
413         dma_fence_put(old);
414
415         return 0;
416 }
417 EXPORT_SYMBOL(dma_resv_copy_fences);
418
419 /**
420  * dma_resv_get_fences_rcu - Get an object's shared and exclusive
421  * fences without update side lock held
422  * @obj: the reservation object
423  * @pfence_excl: the returned exclusive fence (or NULL)
424  * @pshared_count: the number of shared fences returned
425  * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
426  * the required size, and must be freed by caller)
427  *
428  * Retrieve all fences from the reservation object. If the pointer for the
429  * exclusive fence is not specified the fence is put into the array of the
430  * shared fences as well. Returns either zero or -ENOMEM.
431  */
432 int dma_resv_get_fences_rcu(struct dma_resv *obj,
433                             struct dma_fence **pfence_excl,
434                             unsigned *pshared_count,
435                             struct dma_fence ***pshared)
436 {
437         struct dma_fence **shared = NULL;
438         struct dma_fence *fence_excl;
439         unsigned int shared_count;
440         int ret = 1;
441
442         do {
443                 struct dma_resv_list *fobj;
444                 unsigned int i, seq;
445                 size_t sz = 0;
446
447                 shared_count = i = 0;
448
449                 rcu_read_lock();
450                 seq = read_seqcount_begin(&obj->seq);
451
452                 fence_excl = rcu_dereference(obj->fence_excl);
453                 if (fence_excl && !dma_fence_get_rcu(fence_excl))
454                         goto unlock;
455
456                 fobj = rcu_dereference(obj->fence);
457                 if (fobj)
458                         sz += sizeof(*shared) * fobj->shared_max;
459
460                 if (!pfence_excl && fence_excl)
461                         sz += sizeof(*shared);
462
463                 if (sz) {
464                         struct dma_fence **nshared;
465
466                         nshared = krealloc(shared, sz,
467                                            GFP_NOWAIT | __GFP_NOWARN);
468                         if (!nshared) {
469                                 rcu_read_unlock();
470
471                                 dma_fence_put(fence_excl);
472                                 fence_excl = NULL;
473
474                                 nshared = krealloc(shared, sz, GFP_KERNEL);
475                                 if (nshared) {
476                                         shared = nshared;
477                                         continue;
478                                 }
479
480                                 ret = -ENOMEM;
481                                 break;
482                         }
483                         shared = nshared;
484                         shared_count = fobj ? fobj->shared_count : 0;
485                         for (i = 0; i < shared_count; ++i) {
486                                 shared[i] = rcu_dereference(fobj->shared[i]);
487                                 if (!dma_fence_get_rcu(shared[i]))
488                                         break;
489                         }
490                 }
491
492                 if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
493                         while (i--)
494                                 dma_fence_put(shared[i]);
495                         dma_fence_put(fence_excl);
496                         goto unlock;
497                 }
498
499                 ret = 0;
500 unlock:
501                 rcu_read_unlock();
502         } while (ret);
503
504         if (pfence_excl)
505                 *pfence_excl = fence_excl;
506         else if (fence_excl)
507                 shared[shared_count++] = fence_excl;
508
509         if (!shared_count) {
510                 kfree(shared);
511                 shared = NULL;
512         }
513
514         *pshared_count = shared_count;
515         *pshared = shared;
516         return ret;
517 }
518 EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
519
520 /**
521  * dma_resv_wait_timeout_rcu - Wait on reservation's objects
522  * shared and/or exclusive fences.
523  * @obj: the reservation object
524  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
525  * @intr: if true, do interruptible wait
526  * @timeout: timeout value in jiffies or zero to return immediately
527  *
528  * RETURNS
529  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
530  * greater than zer on success.
531  */
532 long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
533                                bool wait_all, bool intr,
534                                unsigned long timeout)
535 {
536         struct dma_fence *fence;
537         unsigned seq, shared_count;
538         long ret = timeout ? timeout : 1;
539         int i;
540
541 retry:
542         shared_count = 0;
543         seq = read_seqcount_begin(&obj->seq);
544         rcu_read_lock();
545         i = -1;
546
547         fence = rcu_dereference(obj->fence_excl);
548         if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
549                 if (!dma_fence_get_rcu(fence))
550                         goto unlock_retry;
551
552                 if (dma_fence_is_signaled(fence)) {
553                         dma_fence_put(fence);
554                         fence = NULL;
555                 }
556
557         } else {
558                 fence = NULL;
559         }
560
561         if (wait_all) {
562                 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
563
564                 if (fobj)
565                         shared_count = fobj->shared_count;
566
567                 for (i = 0; !fence && i < shared_count; ++i) {
568                         struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
569
570                         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
571                                      &lfence->flags))
572                                 continue;
573
574                         if (!dma_fence_get_rcu(lfence))
575                                 goto unlock_retry;
576
577                         if (dma_fence_is_signaled(lfence)) {
578                                 dma_fence_put(lfence);
579                                 continue;
580                         }
581
582                         fence = lfence;
583                         break;
584                 }
585         }
586
587         rcu_read_unlock();
588         if (fence) {
589                 if (read_seqcount_retry(&obj->seq, seq)) {
590                         dma_fence_put(fence);
591                         goto retry;
592                 }
593
594                 ret = dma_fence_wait_timeout(fence, intr, ret);
595                 dma_fence_put(fence);
596                 if (ret > 0 && wait_all && (i + 1 < shared_count))
597                         goto retry;
598         }
599         return ret;
600
601 unlock_retry:
602         rcu_read_unlock();
603         goto retry;
604 }
605 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
606
607
608 static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
609 {
610         struct dma_fence *fence, *lfence = passed_fence;
611         int ret = 1;
612
613         if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
614                 fence = dma_fence_get_rcu(lfence);
615                 if (!fence)
616                         return -1;
617
618                 ret = !!dma_fence_is_signaled(fence);
619                 dma_fence_put(fence);
620         }
621         return ret;
622 }
623
624 /**
625  * dma_resv_test_signaled_rcu - Test if a reservation object's
626  * fences have been signaled.
627  * @obj: the reservation object
628  * @test_all: if true, test all fences, otherwise only test the exclusive
629  * fence
630  *
631  * RETURNS
632  * true if all fences signaled, else false
633  */
634 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
635 {
636         unsigned seq, shared_count;
637         int ret;
638
639         rcu_read_lock();
640 retry:
641         ret = true;
642         shared_count = 0;
643         seq = read_seqcount_begin(&obj->seq);
644
645         if (test_all) {
646                 unsigned i;
647
648                 struct dma_resv_list *fobj = rcu_dereference(obj->fence);
649
650                 if (fobj)
651                         shared_count = fobj->shared_count;
652
653                 for (i = 0; i < shared_count; ++i) {
654                         struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
655
656                         ret = dma_resv_test_signaled_single(fence);
657                         if (ret < 0)
658                                 goto retry;
659                         else if (!ret)
660                                 break;
661                 }
662
663                 if (read_seqcount_retry(&obj->seq, seq))
664                         goto retry;
665         }
666
667         if (!shared_count) {
668                 struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
669
670                 if (fence_excl) {
671                         ret = dma_resv_test_signaled_single(fence_excl);
672                         if (ret < 0)
673                                 goto retry;
674
675                         if (read_seqcount_retry(&obj->seq, seq))
676                                 goto retry;
677                 }
678         }
679
680         rcu_read_unlock();
681         return ret;
682 }
683 EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);