1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2009, Intel Corporation.
5 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 #include <linux/iova.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/smp.h>
12 #include <linux/bitops.h>
13 #include <linux/cpu.h>
15 /* The anchor node sits above the top of the usable address space */
16 #define IOVA_ANCHOR ~0UL
18 static bool iova_rcache_insert(struct iova_domain *iovad,
21 static unsigned long iova_rcache_get(struct iova_domain *iovad,
23 unsigned long limit_pfn);
24 static void init_iova_rcaches(struct iova_domain *iovad);
25 static void free_iova_rcaches(struct iova_domain *iovad);
26 static void fq_destroy_all_entries(struct iova_domain *iovad);
27 static void fq_flush_timeout(struct timer_list *t);
28 static void free_global_cached_iovas(struct iova_domain *iovad);
30 static struct iova *to_iova(struct rb_node *node)
32 return rb_entry(node, struct iova, node);
36 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
37 unsigned long start_pfn)
40 * IOVA granularity will normally be equal to the smallest
41 * supported IOMMU page size; both *must* be capable of
42 * representing individual CPU pages exactly.
44 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
46 spin_lock_init(&iovad->iova_rbtree_lock);
47 iovad->rbroot = RB_ROOT;
48 iovad->cached_node = &iovad->anchor.node;
49 iovad->cached32_node = &iovad->anchor.node;
50 iovad->granule = granule;
51 iovad->start_pfn = start_pfn;
52 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
53 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
54 iovad->flush_cb = NULL;
56 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
57 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
58 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
59 init_iova_rcaches(iovad);
61 EXPORT_SYMBOL_GPL(init_iova_domain);
63 static bool has_iova_flush_queue(struct iova_domain *iovad)
68 static void free_iova_flush_queue(struct iova_domain *iovad)
70 if (!has_iova_flush_queue(iovad))
73 if (timer_pending(&iovad->fq_timer))
74 del_timer(&iovad->fq_timer);
76 fq_destroy_all_entries(iovad);
78 free_percpu(iovad->fq);
81 iovad->flush_cb = NULL;
82 iovad->entry_dtor = NULL;
85 int init_iova_flush_queue(struct iova_domain *iovad,
86 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
88 struct iova_fq __percpu *queue;
91 atomic64_set(&iovad->fq_flush_start_cnt, 0);
92 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
94 queue = alloc_percpu(struct iova_fq);
98 iovad->flush_cb = flush_cb;
99 iovad->entry_dtor = entry_dtor;
101 for_each_possible_cpu(cpu) {
104 fq = per_cpu_ptr(queue, cpu);
108 spin_lock_init(&fq->lock);
115 timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
116 atomic_set(&iovad->fq_timer_on, 0);
121 static struct rb_node *
122 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
124 if (limit_pfn <= iovad->dma_32bit_pfn)
125 return iovad->cached32_node;
127 return iovad->cached_node;
131 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
133 if (new->pfn_hi < iovad->dma_32bit_pfn)
134 iovad->cached32_node = &new->node;
136 iovad->cached_node = &new->node;
140 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
142 struct iova *cached_iova;
144 cached_iova = to_iova(iovad->cached32_node);
145 if (free == cached_iova ||
146 (free->pfn_hi < iovad->dma_32bit_pfn &&
147 free->pfn_lo >= cached_iova->pfn_lo)) {
148 iovad->cached32_node = rb_next(&free->node);
149 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
152 cached_iova = to_iova(iovad->cached_node);
153 if (free->pfn_lo >= cached_iova->pfn_lo)
154 iovad->cached_node = rb_next(&free->node);
157 /* Insert the iova into domain rbtree by holding writer lock */
159 iova_insert_rbtree(struct rb_root *root, struct iova *iova,
160 struct rb_node *start)
162 struct rb_node **new, *parent = NULL;
164 new = (start) ? &start : &(root->rb_node);
165 /* Figure out where to put new node */
167 struct iova *this = to_iova(*new);
171 if (iova->pfn_lo < this->pfn_lo)
172 new = &((*new)->rb_left);
173 else if (iova->pfn_lo > this->pfn_lo)
174 new = &((*new)->rb_right);
176 WARN_ON(1); /* this should not happen */
180 /* Add new node and rebalance tree. */
181 rb_link_node(&iova->node, parent, new);
182 rb_insert_color(&iova->node, root);
185 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
186 unsigned long size, unsigned long limit_pfn,
187 struct iova *new, bool size_aligned)
189 struct rb_node *curr, *prev;
190 struct iova *curr_iova;
192 unsigned long new_pfn, retry_pfn;
193 unsigned long align_mask = ~0UL;
194 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
197 align_mask <<= fls_long(size - 1);
199 /* Walk the tree backwards */
200 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
201 if (limit_pfn <= iovad->dma_32bit_pfn &&
202 size >= iovad->max32_alloc_size)
205 curr = __get_cached_rbnode(iovad, limit_pfn);
206 curr_iova = to_iova(curr);
207 retry_pfn = curr_iova->pfn_hi + 1;
211 high_pfn = min(high_pfn, curr_iova->pfn_lo);
212 new_pfn = (high_pfn - size) & align_mask;
214 curr = rb_prev(curr);
215 curr_iova = to_iova(curr);
216 } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
218 if (high_pfn < size || new_pfn < low_pfn) {
219 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
220 high_pfn = limit_pfn;
222 curr = &iovad->anchor.node;
223 curr_iova = to_iova(curr);
226 iovad->max32_alloc_size = size;
230 /* pfn_lo will point to size aligned address if size_aligned is set */
231 new->pfn_lo = new_pfn;
232 new->pfn_hi = new->pfn_lo + size - 1;
234 /* If we have 'prev', it's a valid place to start the insertion. */
235 iova_insert_rbtree(&iovad->rbroot, new, prev);
236 __cached_rbnode_insert_update(iovad, new);
238 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
242 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
246 static struct kmem_cache *iova_cache;
247 static unsigned int iova_cache_users;
248 static DEFINE_MUTEX(iova_cache_mutex);
250 static struct iova *alloc_iova_mem(void)
252 return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN);
255 static void free_iova_mem(struct iova *iova)
257 if (iova->pfn_lo != IOVA_ANCHOR)
258 kmem_cache_free(iova_cache, iova);
261 int iova_cache_get(void)
263 mutex_lock(&iova_cache_mutex);
264 if (!iova_cache_users) {
265 iova_cache = kmem_cache_create(
266 "iommu_iova", sizeof(struct iova), 0,
267 SLAB_HWCACHE_ALIGN, NULL);
269 mutex_unlock(&iova_cache_mutex);
270 pr_err("Couldn't create iova cache\n");
276 mutex_unlock(&iova_cache_mutex);
280 EXPORT_SYMBOL_GPL(iova_cache_get);
282 void iova_cache_put(void)
284 mutex_lock(&iova_cache_mutex);
285 if (WARN_ON(!iova_cache_users)) {
286 mutex_unlock(&iova_cache_mutex);
290 if (!iova_cache_users)
291 kmem_cache_destroy(iova_cache);
292 mutex_unlock(&iova_cache_mutex);
294 EXPORT_SYMBOL_GPL(iova_cache_put);
297 * alloc_iova - allocates an iova
298 * @iovad: - iova domain in question
299 * @size: - size of page frames to allocate
300 * @limit_pfn: - max limit address
301 * @size_aligned: - set if size_aligned address range is required
302 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
303 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
304 * flag is set then the allocated address iova->pfn_lo will be naturally
305 * aligned on roundup_power_of_two(size).
308 alloc_iova(struct iova_domain *iovad, unsigned long size,
309 unsigned long limit_pfn,
312 struct iova *new_iova;
315 new_iova = alloc_iova_mem();
319 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
320 new_iova, size_aligned);
323 free_iova_mem(new_iova);
329 EXPORT_SYMBOL_GPL(alloc_iova);
332 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
334 struct rb_node *node = iovad->rbroot.rb_node;
336 assert_spin_locked(&iovad->iova_rbtree_lock);
339 struct iova *iova = to_iova(node);
341 if (pfn < iova->pfn_lo)
342 node = node->rb_left;
343 else if (pfn > iova->pfn_hi)
344 node = node->rb_right;
346 return iova; /* pfn falls within iova's range */
352 static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
354 assert_spin_locked(&iovad->iova_rbtree_lock);
355 __cached_rbnode_delete_update(iovad, iova);
356 rb_erase(&iova->node, &iovad->rbroot);
361 * find_iova - finds an iova for a given pfn
362 * @iovad: - iova domain in question.
363 * @pfn: - page frame number
364 * This function finds and returns an iova belonging to the
365 * given domain which matches the given pfn.
367 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
372 /* Take the lock so that no other thread is manipulating the rbtree */
373 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
374 iova = private_find_iova(iovad, pfn);
375 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
378 EXPORT_SYMBOL_GPL(find_iova);
381 * __free_iova - frees the given iova
382 * @iovad: iova domain in question.
383 * @iova: iova in question.
384 * Frees the given iova belonging to the giving domain
387 __free_iova(struct iova_domain *iovad, struct iova *iova)
391 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
392 private_free_iova(iovad, iova);
393 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
395 EXPORT_SYMBOL_GPL(__free_iova);
398 * free_iova - finds and frees the iova for a given pfn
399 * @iovad: - iova domain in question.
400 * @pfn: - pfn that is allocated previously
401 * This functions finds an iova for a given pfn and then
402 * frees the iova from that domain.
405 free_iova(struct iova_domain *iovad, unsigned long pfn)
410 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
411 iova = private_find_iova(iovad, pfn);
413 private_free_iova(iovad, iova);
414 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
417 EXPORT_SYMBOL_GPL(free_iova);
420 * alloc_iova_fast - allocates an iova from rcache
421 * @iovad: - iova domain in question
422 * @size: - size of page frames to allocate
423 * @limit_pfn: - max limit address
424 * @flush_rcache: - set to flush rcache on regular allocation failure
425 * This function tries to satisfy an iova allocation from the rcache,
426 * and falls back to regular allocation on failure. If regular allocation
427 * fails too and the flush_rcache flag is set then the rcache will be flushed.
430 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
431 unsigned long limit_pfn, bool flush_rcache)
433 unsigned long iova_pfn;
434 struct iova *new_iova;
436 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
441 new_iova = alloc_iova(iovad, size, limit_pfn, true);
448 /* Try replenishing IOVAs by flushing rcache. */
449 flush_rcache = false;
450 for_each_online_cpu(cpu)
451 free_cpu_cached_iovas(cpu, iovad);
452 free_global_cached_iovas(iovad);
456 return new_iova->pfn_lo;
460 * free_iova_fast - free iova pfn range into rcache
461 * @iovad: - iova domain in question.
462 * @pfn: - pfn that is allocated previously
463 * @size: - # of pages in range
464 * This functions frees an iova range by trying to put it into the rcache,
465 * falling back to regular iova deallocation via free_iova() if this fails.
468 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
470 if (iova_rcache_insert(iovad, pfn, size))
473 free_iova(iovad, pfn);
475 EXPORT_SYMBOL_GPL(free_iova_fast);
477 #define fq_ring_for_each(i, fq) \
478 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
480 static inline bool fq_full(struct iova_fq *fq)
482 assert_spin_locked(&fq->lock);
483 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
486 static inline unsigned fq_ring_add(struct iova_fq *fq)
488 unsigned idx = fq->tail;
490 assert_spin_locked(&fq->lock);
492 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
497 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
499 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
502 assert_spin_locked(&fq->lock);
504 fq_ring_for_each(idx, fq) {
506 if (fq->entries[idx].counter >= counter)
509 if (iovad->entry_dtor)
510 iovad->entry_dtor(fq->entries[idx].data);
512 free_iova_fast(iovad,
513 fq->entries[idx].iova_pfn,
514 fq->entries[idx].pages);
516 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
520 static void iova_domain_flush(struct iova_domain *iovad)
522 atomic64_inc(&iovad->fq_flush_start_cnt);
523 iovad->flush_cb(iovad);
524 atomic64_inc(&iovad->fq_flush_finish_cnt);
527 static void fq_destroy_all_entries(struct iova_domain *iovad)
532 * This code runs when the iova_domain is being detroyed, so don't
533 * bother to free iovas, just call the entry_dtor on all remaining
536 if (!iovad->entry_dtor)
539 for_each_possible_cpu(cpu) {
540 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
543 fq_ring_for_each(idx, fq)
544 iovad->entry_dtor(fq->entries[idx].data);
548 static void fq_flush_timeout(struct timer_list *t)
550 struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
553 atomic_set(&iovad->fq_timer_on, 0);
554 iova_domain_flush(iovad);
556 for_each_possible_cpu(cpu) {
560 fq = per_cpu_ptr(iovad->fq, cpu);
561 spin_lock_irqsave(&fq->lock, flags);
562 fq_ring_free(iovad, fq);
563 spin_unlock_irqrestore(&fq->lock, flags);
567 void queue_iova(struct iova_domain *iovad,
568 unsigned long pfn, unsigned long pages,
571 struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
575 spin_lock_irqsave(&fq->lock, flags);
578 * First remove all entries from the flush queue that have already been
579 * flushed out on another CPU. This makes the fq_full() check below less
582 fq_ring_free(iovad, fq);
585 iova_domain_flush(iovad);
586 fq_ring_free(iovad, fq);
589 idx = fq_ring_add(fq);
591 fq->entries[idx].iova_pfn = pfn;
592 fq->entries[idx].pages = pages;
593 fq->entries[idx].data = data;
594 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
596 spin_unlock_irqrestore(&fq->lock, flags);
598 /* Avoid false sharing as much as possible. */
599 if (!atomic_read(&iovad->fq_timer_on) &&
600 !atomic_xchg(&iovad->fq_timer_on, 1))
601 mod_timer(&iovad->fq_timer,
602 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
606 * put_iova_domain - destroys the iova domain
607 * @iovad: - iova domain in question.
608 * All the iova's in that domain are destroyed.
610 void put_iova_domain(struct iova_domain *iovad)
612 struct iova *iova, *tmp;
614 free_iova_flush_queue(iovad);
615 free_iova_rcaches(iovad);
616 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
619 EXPORT_SYMBOL_GPL(put_iova_domain);
622 __is_range_overlap(struct rb_node *node,
623 unsigned long pfn_lo, unsigned long pfn_hi)
625 struct iova *iova = to_iova(node);
627 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
632 static inline struct iova *
633 alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
637 iova = alloc_iova_mem();
639 iova->pfn_lo = pfn_lo;
640 iova->pfn_hi = pfn_hi;
647 __insert_new_range(struct iova_domain *iovad,
648 unsigned long pfn_lo, unsigned long pfn_hi)
652 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
654 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
660 __adjust_overlap_range(struct iova *iova,
661 unsigned long *pfn_lo, unsigned long *pfn_hi)
663 if (*pfn_lo < iova->pfn_lo)
664 iova->pfn_lo = *pfn_lo;
665 if (*pfn_hi > iova->pfn_hi)
666 *pfn_lo = iova->pfn_hi + 1;
670 * reserve_iova - reserves an iova in the given range
671 * @iovad: - iova domain pointer
672 * @pfn_lo: - lower page frame address
673 * @pfn_hi:- higher pfn adderss
674 * This function allocates reserves the address range from pfn_lo to pfn_hi so
675 * that this address is not dished out as part of alloc_iova.
678 reserve_iova(struct iova_domain *iovad,
679 unsigned long pfn_lo, unsigned long pfn_hi)
681 struct rb_node *node;
684 unsigned int overlap = 0;
686 /* Don't allow nonsensical pfns */
687 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
690 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
691 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
692 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
693 iova = to_iova(node);
694 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
695 if ((pfn_lo >= iova->pfn_lo) &&
696 (pfn_hi <= iova->pfn_hi))
704 /* We are here either because this is the first reserver node
705 * or need to insert remaining non overlap addr range
707 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
710 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
713 EXPORT_SYMBOL_GPL(reserve_iova);
716 * Magazine caches for IOVA ranges. For an introduction to magazines,
717 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
718 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
719 * For simplicity, we use a static magazine size and don't implement the
720 * dynamic size tuning described in the paper.
723 #define IOVA_MAG_SIZE 128
725 struct iova_magazine {
727 unsigned long pfns[IOVA_MAG_SIZE];
730 struct iova_cpu_rcache {
732 struct iova_magazine *loaded;
733 struct iova_magazine *prev;
736 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
738 return kzalloc(sizeof(struct iova_magazine), flags);
741 static void iova_magazine_free(struct iova_magazine *mag)
747 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
755 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
757 for (i = 0 ; i < mag->size; ++i) {
758 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
763 private_free_iova(iovad, iova);
766 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
771 static bool iova_magazine_full(struct iova_magazine *mag)
773 return (mag && mag->size == IOVA_MAG_SIZE);
776 static bool iova_magazine_empty(struct iova_magazine *mag)
778 return (!mag || mag->size == 0);
781 static unsigned long iova_magazine_pop(struct iova_magazine *mag,
782 unsigned long limit_pfn)
787 BUG_ON(iova_magazine_empty(mag));
789 /* Only fall back to the rbtree if we have no suitable pfns at all */
790 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
794 /* Swap it to pop it */
796 mag->pfns[i] = mag->pfns[--mag->size];
801 static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
803 BUG_ON(iova_magazine_full(mag));
805 mag->pfns[mag->size++] = pfn;
808 static void init_iova_rcaches(struct iova_domain *iovad)
810 struct iova_cpu_rcache *cpu_rcache;
811 struct iova_rcache *rcache;
815 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
816 rcache = &iovad->rcaches[i];
817 spin_lock_init(&rcache->lock);
818 rcache->depot_size = 0;
819 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
820 if (WARN_ON(!rcache->cpu_rcaches))
822 for_each_possible_cpu(cpu) {
823 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
824 spin_lock_init(&cpu_rcache->lock);
825 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
826 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
832 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
833 * return true on success. Can fail if rcache is full and we can't free
834 * space, and free_iova() (our only caller) will then return the IOVA
835 * range to the rbtree instead.
837 static bool __iova_rcache_insert(struct iova_domain *iovad,
838 struct iova_rcache *rcache,
839 unsigned long iova_pfn)
841 struct iova_magazine *mag_to_free = NULL;
842 struct iova_cpu_rcache *cpu_rcache;
843 bool can_insert = false;
846 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
847 spin_lock_irqsave(&cpu_rcache->lock, flags);
849 if (!iova_magazine_full(cpu_rcache->loaded)) {
851 } else if (!iova_magazine_full(cpu_rcache->prev)) {
852 swap(cpu_rcache->prev, cpu_rcache->loaded);
855 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
858 spin_lock(&rcache->lock);
859 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
860 rcache->depot[rcache->depot_size++] =
863 mag_to_free = cpu_rcache->loaded;
865 spin_unlock(&rcache->lock);
867 cpu_rcache->loaded = new_mag;
873 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
875 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
878 iova_magazine_free_pfns(mag_to_free, iovad);
879 iova_magazine_free(mag_to_free);
885 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
888 unsigned int log_size = order_base_2(size);
890 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
893 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
897 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
898 * satisfy the request, return a matching non-NULL range and remove
899 * it from the 'rcache'.
901 static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
902 unsigned long limit_pfn)
904 struct iova_cpu_rcache *cpu_rcache;
905 unsigned long iova_pfn = 0;
906 bool has_pfn = false;
909 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
910 spin_lock_irqsave(&cpu_rcache->lock, flags);
912 if (!iova_magazine_empty(cpu_rcache->loaded)) {
914 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
915 swap(cpu_rcache->prev, cpu_rcache->loaded);
918 spin_lock(&rcache->lock);
919 if (rcache->depot_size > 0) {
920 iova_magazine_free(cpu_rcache->loaded);
921 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
924 spin_unlock(&rcache->lock);
928 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
930 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
936 * Try to satisfy IOVA allocation range from rcache. Fail if requested
937 * size is too big or the DMA limit we are given isn't satisfied by the
938 * top element in the magazine.
940 static unsigned long iova_rcache_get(struct iova_domain *iovad,
942 unsigned long limit_pfn)
944 unsigned int log_size = order_base_2(size);
946 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
949 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
953 * free rcache data structures.
955 static void free_iova_rcaches(struct iova_domain *iovad)
957 struct iova_rcache *rcache;
958 struct iova_cpu_rcache *cpu_rcache;
962 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
963 rcache = &iovad->rcaches[i];
964 for_each_possible_cpu(cpu) {
965 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
966 iova_magazine_free(cpu_rcache->loaded);
967 iova_magazine_free(cpu_rcache->prev);
969 free_percpu(rcache->cpu_rcaches);
970 for (j = 0; j < rcache->depot_size; ++j)
971 iova_magazine_free(rcache->depot[j]);
976 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
978 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
980 struct iova_cpu_rcache *cpu_rcache;
981 struct iova_rcache *rcache;
985 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
986 rcache = &iovad->rcaches[i];
987 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
988 spin_lock_irqsave(&cpu_rcache->lock, flags);
989 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
990 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
991 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
996 * free all the IOVA ranges of global cache
998 static void free_global_cached_iovas(struct iova_domain *iovad)
1000 struct iova_rcache *rcache;
1001 unsigned long flags;
1004 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1005 rcache = &iovad->rcaches[i];
1006 spin_lock_irqsave(&rcache->lock, flags);
1007 for (j = 0; j < rcache->depot_size; ++j) {
1008 iova_magazine_free_pfns(rcache->depot[j], iovad);
1009 iova_magazine_free(rcache->depot[j]);
1011 rcache->depot_size = 0;
1012 spin_unlock_irqrestore(&rcache->lock, flags);
1015 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1016 MODULE_LICENSE("GPL");