Lines Matching +full:i +full:- +full:tlb +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
6 * I/O TLBs (aka DMA address translation hardware).
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
14 * unnecessary i-cache flushing.
21 #define pr_fmt(fmt) "software IO TLB: " fmt
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
33 #include <linux/iommu-helper.h>
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
58 * Minimum IO TLB size to bother booting with. Systems with mainly
67 * struct io_tlb_slot - IO TLB slot descriptor
69 * @alloc_size: Size of the allocated buffer.
103 * struct io_tlb_area - IO TLB memory area descriptor
107 * @used: The number of used IO TLB block.
142 * swiotlb_adjust_nareas() - adjust the number of areas and slots
146 * The default size of the memory pool may also change to meet minimum area
147 * size requirements.
160 pr_info("SWIOTLB bounce buffer size roundup to %luMB", in swiotlb_adjust_nareas()
165 * limit_nareas() - get the maximum number of areas for a given memory pool size
170 * a memory pool of the given size.
185 /* avoid tail segment of size < IO_TLB_SEGSIZE */ in setup_io_tlb_npages()
209 void __init swiotlb_adjust_size(unsigned long size) in swiotlb_adjust_size() argument
214 * adjust/expand SWIOTLB size for their use. in swiotlb_adjust_size()
219 size = ALIGN(size, IO_TLB_SIZE); in swiotlb_adjust_size()
220 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); in swiotlb_adjust_size()
222 size = default_nslabs << IO_TLB_SHIFT; in swiotlb_adjust_size()
223 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); in swiotlb_adjust_size()
230 if (!mem->nslabs) { in swiotlb_print_info()
235 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
236 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
241 return val & (IO_TLB_SEGSIZE - 1); in io_tlb_offset()
260 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
262 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
263 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT); in swiotlb_update_mem_attributes()
270 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; in swiotlb_init_io_tlb_pool() local
272 mem->nslabs = nslabs; in swiotlb_init_io_tlb_pool()
273 mem->start = start; in swiotlb_init_io_tlb_pool()
274 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_pool()
275 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_pool()
276 mem->nareas = nareas; in swiotlb_init_io_tlb_pool()
277 mem->area_nslabs = nslabs / mem->nareas; in swiotlb_init_io_tlb_pool()
279 for (i = 0; i < mem->nareas; i++) { in swiotlb_init_io_tlb_pool()
280 spin_lock_init(&mem->areas[i].lock); in swiotlb_init_io_tlb_pool()
281 mem->areas[i].index = 0; in swiotlb_init_io_tlb_pool()
282 mem->areas[i].used = 0; in swiotlb_init_io_tlb_pool()
285 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_pool()
286 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i), in swiotlb_init_io_tlb_pool()
287 mem->nslabs - i); in swiotlb_init_io_tlb_pool()
288 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_pool()
289 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_pool()
293 mem->vaddr = vaddr; in swiotlb_init_io_tlb_pool()
298 * add_mem_pool() - add a memory pool to the allocator
299 * @mem: Software IO TLB allocator.
305 spin_lock(&mem->lock); in add_mem_pool()
306 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool()
307 mem->nslabs += pool->nslabs; in add_mem_pool()
308 spin_unlock(&mem->lock); in add_mem_pool()
310 mem->nslabs = pool->nslabs; in add_mem_pool()
316 int (*remap)(void *tlb, unsigned long nslabs)) in swiotlb_memblock_alloc() argument
319 void *tlb; in swiotlb_memblock_alloc() local
327 tlb = memblock_alloc(bytes, PAGE_SIZE); in swiotlb_memblock_alloc()
329 tlb = memblock_alloc_low(bytes, PAGE_SIZE); in swiotlb_memblock_alloc()
331 if (!tlb) { in swiotlb_memblock_alloc()
332 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", in swiotlb_memblock_alloc()
337 if (remap && remap(tlb, nslabs) < 0) { in swiotlb_memblock_alloc()
338 memblock_free(tlb, PAGE_ALIGN(bytes)); in swiotlb_memblock_alloc()
343 return tlb; in swiotlb_memblock_alloc()
348 * structures for the software IO TLB used to implement the DMA API.
351 int (*remap)(void *tlb, unsigned long nslabs)) in swiotlb_init_remap() argument
357 void *tlb; in swiotlb_init_remap() local
371 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1); in swiotlb_init_remap()
381 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) { in swiotlb_init_remap()
389 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", in swiotlb_init_remap()
394 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_remap()
395 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_remap()
396 if (!mem->slots) { in swiotlb_init_remap()
402 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), in swiotlb_init_remap()
404 if (!mem->areas) { in swiotlb_init_remap()
405 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); in swiotlb_init_remap()
409 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas); in swiotlb_init_remap()
426 int swiotlb_init_late(size_t size, gfp_t gfp_mask, in swiotlb_init_late() argument
427 int (*remap)(void *tlb, unsigned long nslabs)) in swiotlb_init_late() argument
430 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); in swiotlb_init_late()
453 io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1); in swiotlb_init_late()
468 order--; in swiotlb_init_late()
474 return -ENOMEM; in swiotlb_init_late()
494 area_order = get_order(array_size(sizeof(*mem->areas), nareas)); in swiotlb_init_late()
495 mem->areas = (struct io_tlb_area *) in swiotlb_init_late()
497 if (!mem->areas) in swiotlb_init_late()
500 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_init_late()
501 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_init_late()
502 if (!mem->slots) in swiotlb_init_late()
515 free_pages((unsigned long)mem->areas, area_order); in swiotlb_init_late()
518 return -ENOMEM; in swiotlb_init_late()
531 if (!mem->nslabs) in swiotlb_exit()
535 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
536 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
537 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
540 if (mem->late_alloc) { in swiotlb_exit()
541 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_exit()
542 mem->nareas)); in swiotlb_exit()
543 free_pages((unsigned long)mem->areas, area_order); in swiotlb_exit()
545 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
547 memblock_free_late(__pa(mem->areas), in swiotlb_exit()
548 array_size(sizeof(*mem->areas), mem->nareas)); in swiotlb_exit()
549 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
550 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
559 * alloc_dma_pages() - allocate pages to be used for DMA
561 * @bytes: Size of the buffer.
567 * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
582 if (paddr + bytes - 1 > phys_limit) { in alloc_dma_pages()
584 return ERR_PTR(-EAGAIN); in alloc_dma_pages()
600 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
602 * @bytes: Size of the buffer.
649 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
651 * @bytes: Size of the buffer.
665 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
673 * Allocate and initialize a new IO TLB memory pool. The actual number of
685 struct page *tlb; in swiotlb_alloc_pool() local
694 pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas); in swiotlb_alloc_pool()
698 pool->areas = (void *)pool + sizeof(*pool); in swiotlb_alloc_pool()
701 while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) { in swiotlb_alloc_pool()
709 slot_order = get_order(array_size(sizeof(*pool->slots), nslabs)); in swiotlb_alloc_pool()
710 pool->slots = (struct io_tlb_slot *) in swiotlb_alloc_pool()
712 if (!pool->slots) in swiotlb_alloc_pool()
715 swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas); in swiotlb_alloc_pool()
719 swiotlb_free_tlb(page_address(tlb), tlb_size); in swiotlb_alloc_pool()
727 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
737 default_nareas, mem->phys_limit, GFP_KERNEL); in swiotlb_dyn_alloc()
747 * swiotlb_dyn_free() - RCU callback to free a memory pool
753 size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs); in swiotlb_dyn_free()
754 size_t tlb_size = pool->end - pool->start; in swiotlb_dyn_free()
756 free_pages((unsigned long)pool->slots, get_order(slots_size)); in swiotlb_dyn_free()
757 swiotlb_free_tlb(pool->vaddr, tlb_size); in swiotlb_dyn_free()
762 * swiotlb_find_pool() - find the IO TLB pool for a physical address
766 * Find the IO TLB memory pool descriptor which contains the given physical
773 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_pool()
777 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_find_pool()
778 if (paddr >= pool->start && paddr < pool->end) in swiotlb_find_pool()
782 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) { in swiotlb_find_pool()
783 if (paddr >= pool->start && paddr < pool->end) in swiotlb_find_pool()
793 * swiotlb_del_pool() - remove an IO TLB pool from a device
801 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); in swiotlb_del_pool()
802 list_del_rcu(&pool->node); in swiotlb_del_pool()
803 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags); in swiotlb_del_pool()
805 call_rcu(&pool->rcu, swiotlb_dyn_free); in swiotlb_del_pool()
811 * swiotlb_dev_init() - initialize swiotlb fields in &struct device
816 dev->dma_io_tlb_mem = &io_tlb_default_mem; in swiotlb_dev_init()
818 INIT_LIST_HEAD(&dev->dma_io_tlb_pools); in swiotlb_dev_init()
819 spin_lock_init(&dev->dma_io_tlb_lock); in swiotlb_dev_init()
820 dev->dma_uses_io_tlb = false; in swiotlb_dev_init()
829 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); in swiotlb_align_offset()
835 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, in swiotlb_bounce() argument
839 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
840 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
841 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
843 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; in swiotlb_bounce()
849 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); in swiotlb_bounce()
858 tlb_offset -= orig_addr_offset; in swiotlb_bounce()
861 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", in swiotlb_bounce()
862 alloc_size, size, tlb_offset); in swiotlb_bounce()
867 alloc_size -= tlb_offset; in swiotlb_bounce()
869 if (size > alloc_size) { in swiotlb_bounce()
871 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", in swiotlb_bounce()
872 alloc_size, size); in swiotlb_bounce()
873 size = alloc_size; in swiotlb_bounce()
882 while (size) { in swiotlb_bounce()
883 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce()
893 size -= sz; in swiotlb_bounce()
899 memcpy(vaddr, phys_to_virt(orig_addr), size); in swiotlb_bounce()
901 memcpy(phys_to_virt(orig_addr), vaddr, size); in swiotlb_bounce()
920 if (index >= mem->area_nslabs) in wrap_area_index()
936 new_used = atomic_long_add_return(nslots, &mem->total_used); in inc_used_and_hiwater()
937 old_hiwater = atomic_long_read(&mem->used_hiwater); in inc_used_and_hiwater()
941 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater, in inc_used_and_hiwater()
947 atomic_long_sub(nslots, &mem->total_used); in dec_used()
960 * swiotlb_search_pool_area() - search one memory area in one pool
963 * @area_index: Index of the IO TLB memory area to be searched.
964 * @orig_addr: Original (non-bounced) IO buffer address.
965 * @alloc_size: Total requested size of the bounce buffer,
969 * Find a suitable sequence of IO TLB entries for the request and allocate
970 * a buffer from the given IO TLB memory area.
973 * Return: Index of the first allocated slot, or -1 on error.
979 struct io_tlb_area *area = pool->areas + area_index; in swiotlb_search_pool_area()
982 phys_to_dma_unencrypted(dev, pool->start) & boundary_mask; in swiotlb_search_pool_area()
988 unsigned int index, slots_checked, count = 0, i; in swiotlb_search_pool_area() local
994 BUG_ON(area_index >= pool->nareas); in swiotlb_search_pool_area()
1002 iotlb_align_mask &= ~(IO_TLB_SIZE - 1); in swiotlb_search_pool_area()
1010 spin_lock_irqsave(&area->lock, flags); in swiotlb_search_pool_area()
1011 if (unlikely(nslots > pool->area_nslabs - area->used)) in swiotlb_search_pool_area()
1014 slot_base = area_index * pool->area_nslabs; in swiotlb_search_pool_area()
1015 index = area->index; in swiotlb_search_pool_area()
1017 for (slots_checked = 0; slots_checked < pool->area_nslabs; ) { in swiotlb_search_pool_area()
1031 if (pool->slots[slot_index].list >= nslots) in swiotlb_search_pool_area()
1039 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_search_pool_area()
1040 return -1; in swiotlb_search_pool_area()
1048 for (i = slot_index; i < slot_index + nslots; i++) { in swiotlb_search_pool_area()
1049 pool->slots[i].list = 0; in swiotlb_search_pool_area()
1050 pool->slots[i].alloc_size = alloc_size - (offset + in swiotlb_search_pool_area()
1051 ((i - slot_index) << IO_TLB_SHIFT)); in swiotlb_search_pool_area()
1053 for (i = slot_index - 1; in swiotlb_search_pool_area()
1054 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && in swiotlb_search_pool_area()
1055 pool->slots[i].list; i--) in swiotlb_search_pool_area()
1056 pool->slots[i].list = ++count; in swiotlb_search_pool_area()
1061 area->index = wrap_area_index(pool, index + nslots); in swiotlb_search_pool_area()
1062 area->used += nslots; in swiotlb_search_pool_area()
1063 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_search_pool_area()
1065 inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots); in swiotlb_search_pool_area()
1072 * swiotlb_search_area() - search one memory area in all pools
1076 * @orig_addr: Original (non-bounced) IO buffer address.
1077 * @alloc_size: Total requested size of the bounce buffer,
1085 * Return: Index of the first allocated slot, or -1 on error.
1091 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_search_area()
1094 int index = -1; in swiotlb_search_area()
1097 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_search_area()
1098 if (cpu_offset >= pool->nareas) in swiotlb_search_area()
1100 area_index = (start_cpu + cpu_offset) & (pool->nareas - 1); in swiotlb_search_area()
1114 * swiotlb_find_slots() - search for slots in the whole swiotlb
1116 * @orig_addr: Original (non-bounced) IO buffer address.
1117 * @alloc_size: Total requested size of the bounce buffer,
1122 * Search through the whole software IO TLB to find a sequence of slots that
1125 * Return: Index of the first allocated slot, or -1 on error.
1131 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots()
1136 int cpu, i; in swiotlb_find_slots() local
1140 return -1; in swiotlb_find_slots()
1143 for (i = 0; i < default_nareas; ++i) { in swiotlb_find_slots()
1144 index = swiotlb_search_area(dev, cpu, i, orig_addr, alloc_size, in swiotlb_find_slots()
1150 if (!mem->can_grow) in swiotlb_find_slots()
1151 return -1; in swiotlb_find_slots()
1153 schedule_work(&mem->dyn_alloc); in swiotlb_find_slots()
1156 phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit); in swiotlb_find_slots()
1160 return -1; in swiotlb_find_slots()
1165 swiotlb_dyn_free(&pool->rcu); in swiotlb_find_slots()
1166 return -1; in swiotlb_find_slots()
1169 pool->transient = true; in swiotlb_find_slots()
1170 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); in swiotlb_find_slots()
1171 list_add_rcu(&pool->node, &dev->dma_io_tlb_pools); in swiotlb_find_slots()
1172 spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags); in swiotlb_find_slots()
1175 WRITE_ONCE(dev->dma_uses_io_tlb, true); in swiotlb_find_slots()
1182 * First, the store to dev->dma_uses_io_tlb must be ordered before the in swiotlb_find_slots()
1184 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb. in swiotlb_find_slots()
1186 * Second, the load from mem->pools must be ordered before the same in swiotlb_find_slots()
1207 int start, i; in swiotlb_find_slots() local
1210 *retpool = pool = &dev->dma_io_tlb_mem->defpool; in swiotlb_find_slots()
1211 i = start = raw_smp_processor_id() & (pool->nareas - 1); in swiotlb_find_slots()
1213 index = swiotlb_search_pool_area(dev, pool, i, orig_addr, in swiotlb_find_slots()
1217 if (++i >= pool->nareas) in swiotlb_find_slots()
1218 i = 0; in swiotlb_find_slots()
1219 } while (i != start); in swiotlb_find_slots()
1220 return -1; in swiotlb_find_slots()
1228 * mem_used() - get number of used slots in an allocator
1229 * @mem: Software IO TLB allocator.
1238 return atomic_long_read(&mem->total_used); in mem_used()
1244 * mem_pool_used() - get number of used slots in a memory pool
1245 * @pool: Software IO TLB memory pool.
1253 int i; in mem_pool_used() local
1256 for (i = 0; i < pool->nareas; i++) in mem_pool_used()
1257 used += pool->areas[i].used; in mem_pool_used()
1262 * mem_used() - get number of used slots in an allocator
1263 * @mem: Software IO TLB allocator.
1277 list_for_each_entry_rcu(pool, &mem->pools, node) in mem_used()
1283 return mem_pool_used(&mem->defpool); in mem_used()
1294 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single()
1297 unsigned int i; in swiotlb_tbl_map_single() local
1301 if (!mem || !mem->nslabs) { in swiotlb_tbl_map_single()
1318 if (index == -1) { in swiotlb_tbl_map_single()
1322 alloc_size, mem->nslabs, mem_used(mem)); in swiotlb_tbl_map_single()
1331 for (i = 0; i < nr_slots(alloc_size + offset); i++) in swiotlb_tbl_map_single()
1332 pool->slots[index + i].orig_addr = slot_addr(orig_addr, i); in swiotlb_tbl_map_single()
1333 tlb_addr = slot_addr(pool->start, index) + offset; in swiotlb_tbl_map_single()
1335 * When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy in swiotlb_tbl_map_single()
1336 * the original buffer to the TLB buffer before initiating DMA in order in swiotlb_tbl_map_single()
1338 * i.e. if the device doesn't overwrite the entire buffer. Preserving in swiotlb_tbl_map_single()
1341 * i.e. swiotlb must not corrupt memory by clobbering unwritten bytes. in swiotlb_tbl_map_single()
1352 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
1353 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
1354 int aindex = index / mem->area_nslabs; in swiotlb_release_slots()
1355 struct io_tlb_area *area = &mem->areas[aindex]; in swiotlb_release_slots()
1356 int count, i; in swiotlb_release_slots() local
1364 BUG_ON(aindex >= mem->nareas); in swiotlb_release_slots()
1366 spin_lock_irqsave(&area->lock, flags); in swiotlb_release_slots()
1368 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
1376 for (i = index + nslots - 1; i >= index; i--) { in swiotlb_release_slots()
1377 mem->slots[i].list = ++count; in swiotlb_release_slots()
1378 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
1379 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
1386 for (i = index - 1; in swiotlb_release_slots()
1387 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
1388 i--) in swiotlb_release_slots()
1389 mem->slots[i].list = ++count; in swiotlb_release_slots()
1390 area->used -= nslots; in swiotlb_release_slots()
1391 spin_unlock_irqrestore(&area->lock, flags); in swiotlb_release_slots()
1393 dec_used(dev->dma_io_tlb_mem, nslots); in swiotlb_release_slots()
1399 * swiotlb_del_transient() - delete a transient memory pool
1413 if (!pool->transient) in swiotlb_del_transient()
1416 dec_used(dev->dma_io_tlb_mem, pool->nslabs); in swiotlb_del_transient()
1451 size_t size, enum dma_data_direction dir) in swiotlb_sync_single_for_device() argument
1454 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); in swiotlb_sync_single_for_device()
1460 size_t size, enum dma_data_direction dir) in swiotlb_sync_single_for_cpu() argument
1463 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); in swiotlb_sync_single_for_cpu()
1472 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, in swiotlb_map() argument
1478 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); in swiotlb_map()
1480 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, in swiotlb_map()
1487 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { in swiotlb_map()
1488 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, in swiotlb_map()
1492 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in swiotlb_map()
1497 arch_sync_dma_for_device(swiotlb_addr, size, dir); in swiotlb_map()
1508 * min align mask. This affects max mapping size. in swiotlb_max_mapping_size()
1514 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; in swiotlb_max_mapping_size()
1518 * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1527 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active()
1529 return mem && mem->nslabs; in is_swiotlb_active()
1533 * default_swiotlb_base() - get the base address of the default SWIOTLB
1535 * Get the lowest physical address used by the default software IO TLB pool.
1546 * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1548 * Get the highest physical address used by the default software IO TLB pool.
1555 return io_tlb_default_mem.defpool.end - 1; in default_swiotlb_limit()
1573 *val = atomic_long_read(&mem->used_hiwater); in io_tlb_hiwater_get()
1583 return -EINVAL; in io_tlb_hiwater_set()
1585 atomic_long_set(&mem->used_hiwater, val); in io_tlb_hiwater_set()
1596 atomic_long_set(&mem->total_used, 0); in swiotlb_create_debugfs_files()
1597 atomic_long_set(&mem->used_hiwater, 0); in swiotlb_create_debugfs_files()
1599 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); in swiotlb_create_debugfs_files()
1600 if (!mem->nslabs) in swiotlb_create_debugfs_files()
1603 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
1604 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1606 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1629 struct page *swiotlb_alloc(struct device *dev, size_t size) in swiotlb_alloc() argument
1631 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc()
1639 index = swiotlb_find_slots(dev, 0, size, 0, &pool); in swiotlb_alloc()
1640 if (index == -1) in swiotlb_alloc()
1643 tlb_addr = slot_addr(pool->start, index); in swiotlb_alloc()
1648 bool swiotlb_free(struct device *dev, struct page *page, size_t size) in swiotlb_free() argument
1663 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init()
1664 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; in rmem_swiotlb_device_init()
1666 /* Set Per-device io tlb area to one */ in rmem_swiotlb_device_init()
1669 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { in rmem_swiotlb_device_init()
1671 return -EINVAL; in rmem_swiotlb_device_init()
1684 return -ENOMEM; in rmem_swiotlb_device_init()
1685 pool = &mem->defpool; in rmem_swiotlb_device_init()
1687 pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL); in rmem_swiotlb_device_init()
1688 if (!pool->slots) { in rmem_swiotlb_device_init()
1690 return -ENOMEM; in rmem_swiotlb_device_init()
1693 pool->areas = kcalloc(nareas, sizeof(*pool->areas), in rmem_swiotlb_device_init()
1695 if (!pool->areas) { in rmem_swiotlb_device_init()
1696 kfree(pool->slots); in rmem_swiotlb_device_init()
1698 return -ENOMEM; in rmem_swiotlb_device_init()
1701 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), in rmem_swiotlb_device_init()
1702 rmem->size >> PAGE_SHIFT); in rmem_swiotlb_device_init()
1703 swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs, in rmem_swiotlb_device_init()
1705 mem->force_bounce = true; in rmem_swiotlb_device_init()
1706 mem->for_alloc = true; in rmem_swiotlb_device_init()
1708 spin_lock_init(&mem->lock); in rmem_swiotlb_device_init()
1712 rmem->priv = mem; in rmem_swiotlb_device_init()
1714 swiotlb_create_debugfs_files(mem, rmem->name); in rmem_swiotlb_device_init()
1717 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()
1725 dev->dma_io_tlb_mem = &io_tlb_default_mem; in rmem_swiotlb_device_release()
1735 unsigned long node = rmem->fdt_node; in rmem_swiotlb_setup()
1738 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || in rmem_swiotlb_setup()
1739 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || in rmem_swiotlb_setup()
1740 of_get_flat_dt_prop(node, "no-map", NULL)) in rmem_swiotlb_setup()
1741 return -EINVAL; in rmem_swiotlb_setup()
1743 rmem->ops = &rmem_swiotlb_ops; in rmem_swiotlb_setup()
1744 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", in rmem_swiotlb_setup()
1745 &rmem->base, (unsigned long)rmem->size / SZ_1M); in rmem_swiotlb_setup()
1749 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);