Lines Matching full:mem
228 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_print_info() local
230 if (!mem->nslabs) { in swiotlb_print_info()
231 pr_warn("No low mem\n"); in swiotlb_print_info()
235 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
236 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
257 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_update_mem_attributes() local
260 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
262 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
263 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT); in swiotlb_update_mem_attributes()
266 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start, in swiotlb_init_io_tlb_pool() argument
272 mem->nslabs = nslabs; in swiotlb_init_io_tlb_pool()
273 mem->start = start; in swiotlb_init_io_tlb_pool()
274 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_pool()
275 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_pool()
276 mem->nareas = nareas; in swiotlb_init_io_tlb_pool()
277 mem->area_nslabs = nslabs / mem->nareas; in swiotlb_init_io_tlb_pool()
279 for (i = 0; i < mem->nareas; i++) { in swiotlb_init_io_tlb_pool()
280 spin_lock_init(&mem->areas[i].lock); in swiotlb_init_io_tlb_pool()
281 mem->areas[i].index = 0; in swiotlb_init_io_tlb_pool()
282 mem->areas[i].used = 0; in swiotlb_init_io_tlb_pool()
285 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_pool()
286 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i), in swiotlb_init_io_tlb_pool()
287 mem->nslabs - i); in swiotlb_init_io_tlb_pool()
288 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_pool()
289 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_pool()
293 mem->vaddr = vaddr; in swiotlb_init_io_tlb_pool()
299 * @mem: Software IO TLB allocator.
302 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool) in add_mem_pool() argument
305 spin_lock(&mem->lock); in add_mem_pool()
306 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool()
307 mem->nslabs += pool->nslabs; in add_mem_pool()
308 spin_unlock(&mem->lock); in add_mem_pool()
310 mem->nslabs = pool->nslabs; in add_mem_pool()
353 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_init_remap() local
394 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_remap()
395 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_remap()
396 if (!mem->slots) { in swiotlb_init_remap()
402 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), in swiotlb_init_remap()
404 if (!mem->areas) { in swiotlb_init_remap()
405 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); in swiotlb_init_remap()
409 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas); in swiotlb_init_remap()
410 add_mem_pool(&io_tlb_default_mem, mem); in swiotlb_init_remap()
429 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_init_late() local
494 area_order = get_order(array_size(sizeof(*mem->areas), nareas)); in swiotlb_init_late()
495 mem->areas = (struct io_tlb_area *) in swiotlb_init_late()
497 if (!mem->areas) in swiotlb_init_late()
500 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_init_late()
501 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_init_late()
502 if (!mem->slots) in swiotlb_init_late()
507 swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true, in swiotlb_init_late()
509 add_mem_pool(&io_tlb_default_mem, mem); in swiotlb_init_late()
515 free_pages((unsigned long)mem->areas, area_order); in swiotlb_init_late()
523 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_exit() local
531 if (!mem->nslabs) in swiotlb_exit()
535 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
536 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
537 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
540 if (mem->late_alloc) { in swiotlb_exit()
541 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_exit()
542 mem->nareas)); in swiotlb_exit()
543 free_pages((unsigned long)mem->areas, area_order); in swiotlb_exit()
545 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
547 memblock_free_late(__pa(mem->areas), in swiotlb_exit()
548 array_size(sizeof(*mem->areas), mem->nareas)); in swiotlb_exit()
549 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
550 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
553 memset(mem, 0, sizeof(*mem)); in swiotlb_exit()
732 struct io_tlb_mem *mem = in swiotlb_dyn_alloc() local
737 default_nareas, mem->phys_limit, GFP_KERNEL); in swiotlb_dyn_alloc()
743 add_mem_pool(mem, pool); in swiotlb_dyn_alloc()
773 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_pool() local
777 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_find_pool()
838 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr); in swiotlb_bounce() local
839 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
840 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
841 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
843 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; in swiotlb_bounce()
918 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index) in wrap_area_index() argument
920 if (index >= mem->area_nslabs) in wrap_area_index()
932 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) in inc_used_and_hiwater() argument
936 new_used = atomic_long_add_return(nslots, &mem->total_used); in inc_used_and_hiwater()
937 old_hiwater = atomic_long_read(&mem->used_hiwater); in inc_used_and_hiwater()
941 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater, in inc_used_and_hiwater()
945 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_used() argument
947 atomic_long_sub(nslots, &mem->total_used); in dec_used()
951 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) in inc_used_and_hiwater() argument
954 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_used() argument
1091 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_search_area() local
1097 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_search_area()
1131 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots() local
1150 if (!mem->can_grow) in swiotlb_find_slots()
1153 schedule_work(&mem->dyn_alloc); in swiotlb_find_slots()
1186 * Second, the load from mem->pools must be ordered before the same in swiotlb_find_slots()
1229 * @mem: Software IO TLB allocator.
1236 static unsigned long mem_used(struct io_tlb_mem *mem) in mem_used() argument
1238 return atomic_long_read(&mem->total_used); in mem_used()
1263 * @mem: Software IO TLB allocator.
1270 static unsigned long mem_used(struct io_tlb_mem *mem) in mem_used() argument
1277 list_for_each_entry_rcu(pool, &mem->pools, node) in mem_used()
1283 return mem_pool_used(&mem->defpool); in mem_used()
1294 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single() local
1301 if (!mem || !mem->nslabs) { in swiotlb_tbl_map_single()
1322 alloc_size, mem->nslabs, mem_used(mem)); in swiotlb_tbl_map_single()
1349 struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr); in swiotlb_release_slots() local
1352 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
1353 int nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
1354 int aindex = index / mem->area_nslabs; in swiotlb_release_slots()
1355 struct io_tlb_area *area = &mem->areas[aindex]; in swiotlb_release_slots()
1364 BUG_ON(aindex >= mem->nareas); in swiotlb_release_slots()
1368 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
1377 mem->slots[i].list = ++count; in swiotlb_release_slots()
1378 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
1379 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
1387 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
1389 mem->slots[i].list = ++count; in swiotlb_release_slots()
1527 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active() local
1529 return mem && mem->nslabs; in is_swiotlb_active()
1563 struct io_tlb_mem *mem = data; in io_tlb_used_get() local
1565 *val = mem_used(mem); in io_tlb_used_get()
1571 struct io_tlb_mem *mem = data; in io_tlb_hiwater_get() local
1573 *val = atomic_long_read(&mem->used_hiwater); in io_tlb_hiwater_get()
1579 struct io_tlb_mem *mem = data; in io_tlb_hiwater_set() local
1585 atomic_long_set(&mem->used_hiwater, val); in io_tlb_hiwater_set()
1593 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, in swiotlb_create_debugfs_files() argument
1596 atomic_long_set(&mem->total_used, 0); in swiotlb_create_debugfs_files()
1597 atomic_long_set(&mem->used_hiwater, 0); in swiotlb_create_debugfs_files()
1599 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); in swiotlb_create_debugfs_files()
1600 if (!mem->nslabs) in swiotlb_create_debugfs_files()
1603 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
1604 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1606 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1620 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, in swiotlb_create_debugfs_files() argument
1631 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc() local
1636 if (!mem) in swiotlb_alloc()
1663 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init() local
1679 if (!mem) { in rmem_swiotlb_device_init()
1682 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in rmem_swiotlb_device_init()
1683 if (!mem) in rmem_swiotlb_device_init()
1685 pool = &mem->defpool; in rmem_swiotlb_device_init()
1689 kfree(mem); in rmem_swiotlb_device_init()
1697 kfree(mem); in rmem_swiotlb_device_init()
1705 mem->force_bounce = true; in rmem_swiotlb_device_init()
1706 mem->for_alloc = true; in rmem_swiotlb_device_init()
1708 spin_lock_init(&mem->lock); in rmem_swiotlb_device_init()
1710 add_mem_pool(mem, pool); in rmem_swiotlb_device_init()
1712 rmem->priv = mem; in rmem_swiotlb_device_init()
1714 swiotlb_create_debugfs_files(mem, rmem->name); in rmem_swiotlb_device_init()
1717 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()