Lines Matching +full:i +full:- +full:tlb +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
6 * I/O TLBs (aka DMA address translation hardware).
9 * Copyright (C) 2000, 2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
14 * unnecessary i-cache flushing.
21 #define pr_fmt(fmt) "software IO TLB: " fmt
24 #include <linux/dma-direct.h>
25 #include <linux/dma-map-ops.h>
48 #include <linux/iommu-helper.h>
54 ( (val) & ( (align) - 1)))
56 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
59 * Minimum IO TLB size to bother booting with. Systems with mainly
75 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
81 * The number of used IO TLB block
117 /* avoid tail segment of size < IO_TLB_SEGSIZE */ in setup_io_tlb_npages()
159 unsigned long size; in swiotlb_size_or_default() local
161 size = io_tlb_nslabs << IO_TLB_SHIFT; in swiotlb_size_or_default()
163 return size ? size : (IO_TLB_DEFAULT_SIZE); in swiotlb_size_or_default()
175 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &io_tlb_start, &io_tlb_end, in swiotlb_print_info()
199 int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) in swiotlb_init_with_tbl() argument
201 unsigned long i, bytes; in swiotlb_init_with_tbl() local
207 io_tlb_start = __pa(tlb); in swiotlb_init_with_tbl()
212 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE in swiotlb_init_with_tbl()
227 for (i = 0; i < io_tlb_nslabs; i++) { in swiotlb_init_with_tbl()
228 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); in swiotlb_init_with_tbl()
229 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; in swiotlb_init_with_tbl()
243 * structures for the software IO TLB used to implement the DMA API.
259 /* Get IO TLB memory from the low pages */ in swiotlb_init()
292 * Get IO TLB memory from the low pages in swiotlb_late_init_with_default_size()
303 order--; in swiotlb_late_init_with_default_size()
308 return -ENOMEM; in swiotlb_late_init_with_default_size()
331 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) in swiotlb_late_init_with_tbl() argument
333 unsigned long i, bytes; in swiotlb_late_init_with_tbl() local
338 io_tlb_start = virt_to_phys(tlb); in swiotlb_late_init_with_tbl()
341 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); in swiotlb_late_init_with_tbl()
342 memset(tlb, 0, bytes); in swiotlb_late_init_with_tbl()
346 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE in swiotlb_late_init_with_tbl()
361 for (i = 0; i < io_tlb_nslabs; i++) { in swiotlb_late_init_with_tbl()
362 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); in swiotlb_late_init_with_tbl()
363 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; in swiotlb_late_init_with_tbl()
382 return -ENOMEM; in swiotlb_late_init_with_tbl()
412 size_t size, enum dma_data_direction dir) in swiotlb_bounce() argument
424 while (size) { in swiotlb_bounce()
425 sz = min_t(size_t, PAGE_SIZE - offset, size); in swiotlb_bounce()
436 size -= sz; in swiotlb_bounce()
442 memcpy(vaddr, phys_to_virt(orig_addr), size); in swiotlb_bounce()
444 memcpy(phys_to_virt(orig_addr), vaddr, size); in swiotlb_bounce()
456 int i; in swiotlb_tbl_map_single() local
485 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); in swiotlb_tbl_map_single()
489 * (and hence alignment) to a page size. in swiotlb_tbl_map_single()
493 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); in swiotlb_tbl_map_single()
500 * Find suitable number of IO TLB entries size that will fit this in swiotlb_tbl_map_single()
501 * request and allocate a buffer from that IO TLB pool. in swiotlb_tbl_map_single()
505 if (unlikely(nslots > io_tlb_nslabs - io_tlb_used)) in swiotlb_tbl_map_single()
531 for (i = index; i < (int) (index + nslots); i++) in swiotlb_tbl_map_single()
532 io_tlb_list[i] = 0; in swiotlb_tbl_map_single()
533 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) in swiotlb_tbl_map_single()
534 io_tlb_list[i] = ++count; in swiotlb_tbl_map_single()
568 for (i = 0; i < nslots; i++) in swiotlb_tbl_map_single()
569 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); in swiotlb_tbl_map_single()
585 int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; in swiotlb_tbl_unmap_single() local
586 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; in swiotlb_tbl_unmap_single()
611 for (i = index + nslots - 1; i >= index; i--) { in swiotlb_tbl_unmap_single()
612 io_tlb_list[i] = ++count; in swiotlb_tbl_unmap_single()
613 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; in swiotlb_tbl_unmap_single()
619 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) in swiotlb_tbl_unmap_single()
620 io_tlb_list[i] = ++count; in swiotlb_tbl_unmap_single()
622 io_tlb_used -= nslots; in swiotlb_tbl_unmap_single()
628 size_t size, enum dma_data_direction dir, in swiotlb_tbl_sync_single() argument
631 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; in swiotlb_tbl_sync_single()
636 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); in swiotlb_tbl_sync_single()
642 size, DMA_FROM_DEVICE); in swiotlb_tbl_sync_single()
649 size, DMA_TO_DEVICE); in swiotlb_tbl_sync_single()
662 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, in swiotlb_map() argument
668 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, in swiotlb_map()
671 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir, in swiotlb_map()
678 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { in swiotlb_map()
679 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir, in swiotlb_map()
683 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); in swiotlb_map()
688 arch_sync_dma_for_device(swiotlb_addr, size, dir); in swiotlb_map()