Lines Matching refs:iovad

22 static bool iova_rcache_insert(struct iova_domain *iovad,
25 static unsigned long iova_rcache_get(struct iova_domain *iovad,
28 static void free_iova_rcaches(struct iova_domain *iovad);
29 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
30 static void free_global_cached_iovas(struct iova_domain *iovad);
38 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
48 spin_lock_init(&iovad->iova_rbtree_lock);
49 iovad->rbroot = RB_ROOT;
50 iovad->cached_node = &iovad->anchor.node;
51 iovad->cached32_node = &iovad->anchor.node;
52 iovad->granule = granule;
53 iovad->start_pfn = start_pfn;
54 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
55 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
56 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
57 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
58 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
63 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
65 if (limit_pfn <= iovad->dma_32bit_pfn)
66 return iovad->cached32_node;
68 return iovad->cached_node;
72 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
74 if (new->pfn_hi < iovad->dma_32bit_pfn)
75 iovad->cached32_node = &new->node;
77 iovad->cached_node = &new->node;
81 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
85 cached_iova = to_iova(iovad->cached32_node);
87 (free->pfn_hi < iovad->dma_32bit_pfn &&
89 iovad->cached32_node = rb_next(&free->node);
91 if (free->pfn_lo < iovad->dma_32bit_pfn)
92 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
94 cached_iova = to_iova(iovad->cached_node);
96 iovad->cached_node = rb_next(&free->node);
99 static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
110 if (limit_pfn > iovad->dma_32bit_pfn)
111 return &iovad->anchor.node;
113 node = iovad->rbroot.rb_node;
164 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
173 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
179 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
180 if (limit_pfn <= iovad->dma_32bit_pfn &&
181 size >= iovad->max32_alloc_size)
184 curr = __get_cached_rbnode(iovad, limit_pfn);
198 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
201 curr = iova_find_limit(iovad, limit_pfn);
205 iovad->max32_alloc_size = size;
214 iova_insert_rbtree(&iovad->rbroot, new, prev);
215 __cached_rbnode_insert_update(iovad, new);
217 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
221 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
242 * @iovad: - iova domain in question
246 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
247 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
252 alloc_iova(struct iova_domain *iovad, unsigned long size,
263 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
276 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
278 struct rb_node *node = iovad->rbroot.rb_node;
280 assert_spin_locked(&iovad->iova_rbtree_lock);
296 static void remove_iova(struct iova_domain *iovad, struct iova *iova)
298 assert_spin_locked(&iovad->iova_rbtree_lock);
299 __cached_rbnode_delete_update(iovad, iova);
300 rb_erase(&iova->node, &iovad->rbroot);
305 * @iovad: - iova domain in question.
310 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
316 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
317 iova = private_find_iova(iovad, pfn);
318 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
325 * @iovad: iova domain in question.
330 __free_iova(struct iova_domain *iovad, struct iova *iova)
334 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
335 remove_iova(iovad, iova);
336 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
343 * @iovad: - iova domain in question.
349 free_iova(struct iova_domain *iovad, unsigned long pfn)
354 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
355 iova = private_find_iova(iovad, pfn);
357 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
360 remove_iova(iovad, iova);
361 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
368 * @iovad: - iova domain in question
377 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
392 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
397 new_iova = alloc_iova(iovad, size, limit_pfn, true);
407 free_cpu_cached_iovas(cpu, iovad);
408 free_global_cached_iovas(iovad);
418 * @iovad: - iova domain in question.
425 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
427 if (iova_rcache_insert(iovad, pfn, size))
430 free_iova(iovad, pfn);
434 static void iova_domain_free_rcaches(struct iova_domain *iovad)
437 &iovad->cpuhp_dead);
438 free_iova_rcaches(iovad);
443 * @iovad: - iova domain in question.
446 void put_iova_domain(struct iova_domain *iovad)
450 if (iovad->rcaches)
451 iova_domain_free_rcaches(iovad);
453 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
484 __insert_new_range(struct iova_domain *iovad,
491 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
508 * @iovad: - iova domain pointer
515 reserve_iova(struct iova_domain *iovad,
524 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
527 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
528 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
544 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
547 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
590 struct iova_domain *iovad;
618 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
623 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
626 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
631 remove_iova(iovad, iova);
635 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
707 iova_magazine_free_pfns(mag, rcache->iovad);
713 int iova_domain_init_rcaches(struct iova_domain *iovad)
718 iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
721 if (!iovad->rcaches)
728 rcache = &iovad->rcaches[i];
730 rcache->iovad = iovad;
752 &iovad->cpuhp_dead);
758 free_iova_rcaches(iovad);
769 static bool __iova_rcache_insert(struct iova_domain *iovad,
807 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
815 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
862 static unsigned long iova_rcache_get(struct iova_domain *iovad,
871 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
877 static void free_iova_rcaches(struct iova_domain *iovad)
884 rcache = &iovad->rcaches[i];
898 kfree(iovad->rcaches);
899 iovad->rcaches = NULL;
905 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
913 rcache = &iovad->rcaches[i];
916 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
917 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
925 static void free_global_cached_iovas(struct iova_domain *iovad)
931 rcache = &iovad->rcaches[i];
936 iova_magazine_free_pfns(mag, iovad);
945 struct iova_domain *iovad;
947 iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
949 free_cpu_cached_iovas(cpu, iovad);