Lines Matching +full:i +full:- +full:tlb +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
43 return (paddr >> (PAGE_SHIFT-1)) | 1; in mk_iommu_pte()
67 /* Note that the TLB lookup logic uses bitwise concatenation, in iommu_arena_new_node()
69 the size of the window. Retain the align parameter so that in iommu_arena_new_node()
70 particular systems can over-align the arena. */ in iommu_arena_new_node()
78 arena->ptes = memblock_alloc(mem_size, align); in iommu_arena_new_node()
79 if (!arena->ptes) in iommu_arena_new_node()
83 spin_lock_init(&arena->lock); in iommu_arena_new_node()
84 arena->hose = hose; in iommu_arena_new_node()
85 arena->dma_base = base; in iommu_arena_new_node()
86 arena->size = window_size; in iommu_arena_new_node()
87 arena->next_entry = 0; in iommu_arena_new_node()
89 /* Align allocations to a multiple of a page size. Not needed in iommu_arena_new_node()
91 arena->align_entry = 1; in iommu_arena_new_node()
109 long i, p, nent; in iommu_arena_find_pages() local
114 base = arena->dma_base >> PAGE_SHIFT; in iommu_arena_find_pages()
117 /* Search forward for the first mask-aligned sequence of N free ptes */ in iommu_arena_find_pages()
118 ptes = arena->ptes; in iommu_arena_find_pages()
119 nent = arena->size >> PAGE_SHIFT; in iommu_arena_find_pages()
120 p = ALIGN(arena->next_entry, mask + 1); in iommu_arena_find_pages()
121 i = 0; in iommu_arena_find_pages()
124 while (i < n && p+i < nent) { in iommu_arena_find_pages()
125 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) { in iommu_arena_find_pages()
130 if (ptes[p+i]) { in iommu_arena_find_pages()
131 p = ALIGN(p + i + 1, mask + 1); in iommu_arena_find_pages()
132 i = 0; in iommu_arena_find_pages()
134 i = i + 1; in iommu_arena_find_pages()
138 if (i < n) { in iommu_arena_find_pages()
141 * Reached the end. Flush the TLB and restart in iommu_arena_find_pages()
144 alpha_mv.mv_pci_tbi(arena->hose, 0, -1); in iommu_arena_find_pages()
148 i = 0; in iommu_arena_find_pages()
151 return -1; in iommu_arena_find_pages()
165 long i, p, mask; in iommu_arena_alloc() local
167 spin_lock_irqsave(&arena->lock, flags); in iommu_arena_alloc()
170 ptes = arena->ptes; in iommu_arena_alloc()
171 mask = max(align, arena->align_entry) - 1; in iommu_arena_alloc()
174 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
175 return -1; in iommu_arena_alloc()
179 for the iommu tlb that could load them from under us. in iommu_arena_alloc()
182 for (i = 0; i < n; ++i) in iommu_arena_alloc()
183 ptes[p+i] = IOMMU_INVALID_PTE; in iommu_arena_alloc()
185 arena->next_entry = p + n; in iommu_arena_alloc()
186 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
195 long i; in iommu_arena_free() local
197 p = arena->ptes + ofs; in iommu_arena_free()
198 for (i = 0; i < n; ++i) in iommu_arena_free()
199 p[i] = 0; in iommu_arena_free()
216 if ((dac_offset & dev->dma_mask) != dac_offset) in pci_dac_dma_supported()
226 /* Map a single buffer of the indicated size for PCI DMA in streaming
227 mode. The 32-bit PCI bus mastering address to use is returned.
232 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, in pci_map_single_1() argument
235 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; in pci_map_single_1()
236 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in pci_map_single_1()
238 long npages, dma_ofs, i; in pci_map_single_1() local
242 struct device *dev = pdev ? &pdev->dev : NULL; in pci_map_single_1()
248 if (paddr + size + __direct_map_base - 1 <= max_dma in pci_map_single_1()
249 && paddr + size <= __direct_map_size) { in pci_map_single_1()
252 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n", in pci_map_single_1()
253 cpu_addr, size, ret, __builtin_return_address(0)); in pci_map_single_1()
263 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n", in pci_map_single_1()
264 cpu_addr, size, ret, __builtin_return_address(0)); in pci_map_single_1()
277 arena = hose->sg_pci; in pci_map_single_1()
278 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in pci_map_single_1()
279 arena = hose->sg_isa; in pci_map_single_1()
281 npages = iommu_num_pages(paddr, size, PAGE_SIZE); in pci_map_single_1()
294 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) in pci_map_single_1()
295 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); in pci_map_single_1()
297 ret = arena->dma_base + dma_ofs * PAGE_SIZE; in pci_map_single_1()
300 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n", in pci_map_single_1()
301 cpu_addr, size, npages, ret, __builtin_return_address(0)); in pci_map_single_1()
306 /* Helper for generic DMA-mapping functions. */
312 /* Assume that non-PCI devices asking for DMA are either ISA or EISA, in alpha_gendev_to_pci()
316 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA in alpha_gendev_to_pci()
318 if (!dev || !dev->dma_mask || !*dev->dma_mask) in alpha_gendev_to_pci()
323 if (*dev->dma_mask >= isa_bridge->dma_mask) in alpha_gendev_to_pci()
331 unsigned long offset, size_t size, in alpha_pci_map_page() argument
340 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; in alpha_pci_map_page()
342 size, dac_allowed); in alpha_pci_map_page()
346 SIZE must match what was provided for in a previous pci_map_single
352 size_t size, enum dma_data_direction dir, in alpha_pci_unmap_page() argument
357 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_unmap_page()
368 dma_addr, size, __builtin_return_address(0)); in alpha_pci_unmap_page()
375 dma_addr, size, __builtin_return_address(0)); in alpha_pci_unmap_page()
379 arena = hose->sg_pci; in alpha_pci_unmap_page()
380 if (!arena || dma_addr < arena->dma_base) in alpha_pci_unmap_page()
381 arena = hose->sg_isa; in alpha_pci_unmap_page()
383 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_page()
384 if (dma_ofs * PAGE_SIZE >= arena->size) { in alpha_pci_unmap_page()
386 " base %llx size %x\n", in alpha_pci_unmap_page()
387 dma_addr, arena->dma_base, arena->size); in alpha_pci_unmap_page()
392 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in alpha_pci_unmap_page()
394 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_page()
399 may have snuck back into the TLB since the last wrap flush), in alpha_pci_unmap_page()
400 we need to flush the TLB before reallocating the latter. */ in alpha_pci_unmap_page()
401 if (dma_ofs >= arena->next_entry) in alpha_pci_unmap_page()
402 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); in alpha_pci_unmap_page()
404 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_page()
407 dma_addr, size, npages, __builtin_return_address(0)); in alpha_pci_unmap_page()
411 device. Returns non-NULL cpu-view pointer to the buffer if
415 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, in alpha_pci_alloc_coherent() argument
421 long order = get_order(size); in alpha_pci_alloc_coherent()
435 memset(cpu_addr, 0, size); in alpha_pci_alloc_coherent()
437 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); in alpha_pci_alloc_coherent()
448 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", in alpha_pci_alloc_coherent()
449 size, cpu_addr, *dma_addrp, __builtin_return_address(0)); in alpha_pci_alloc_coherent()
455 be values that were returned from pci_alloc_consistent. SIZE must
460 static void alpha_pci_free_coherent(struct device *dev, size_t size, in alpha_pci_free_coherent() argument
465 dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL); in alpha_pci_free_coherent()
466 free_pages((unsigned long)cpu_addr, get_order(size)); in alpha_pci_free_coherent()
469 dma_addr, size, __builtin_return_address(0)); in alpha_pci_free_coherent()
476 -1 : Not leader, physically adjacent to previous.
477 -2 : Not leader, virtually adjacent to previous.
495 leader_length = leader->length; in sg_classify()
503 len = sg->length; in sg_classify()
509 sg->dma_address = -1; in sg_classify()
512 sg->dma_address = -2; in sg_classify()
517 leader->dma_address = leader_flag; in sg_classify()
518 leader->dma_length = leader_length; in sg_classify()
527 leader->dma_address = leader_flag; in sg_classify()
528 leader->dma_length = leader_length; in sg_classify()
540 long size = leader->dma_length; in sg_fill() local
543 long npages, dma_ofs, i; in sg_fill() local
547 fall into the direct-map window, use it. */ in sg_fill()
548 if (leader->dma_address == 0 in sg_fill()
549 && paddr + size + __direct_map_base - 1 <= max_dma in sg_fill()
550 && paddr + size <= __direct_map_size) { in sg_fill()
551 out->dma_address = paddr + __direct_map_base; in sg_fill()
552 out->dma_length = size; in sg_fill()
554 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", in sg_fill()
555 __va(paddr), size, out->dma_address); in sg_fill()
562 if (leader->dma_address == 0 && dac_allowed) { in sg_fill()
563 out->dma_address = paddr + alpha_mv.pci_dac_offset; in sg_fill()
564 out->dma_length = size; in sg_fill()
566 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", in sg_fill()
567 __va(paddr), size, out->dma_address); in sg_fill()
576 npages = iommu_num_pages(paddr, size, PAGE_SIZE); in sg_fill()
580 if (leader->dma_address == 0) in sg_fill()
581 return -1; in sg_fill()
589 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; in sg_fill()
590 out->dma_length = size; in sg_fill()
592 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", in sg_fill()
593 __va(paddr), size, out->dma_address, npages); in sg_fill()
597 ptes = &arena->ptes[dma_ofs]; in sg_fill()
604 size = sg->length; in sg_fill()
607 while (sg+1 < end && (int) sg[1].dma_address == -1) { in sg_fill()
608 size += sg[1].length; in sg_fill()
612 npages = iommu_num_pages(paddr, size, PAGE_SIZE); in sg_fill()
615 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) in sg_fill()
620 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), in sg_fill()
621 last_sg->length, npages); in sg_fill()
624 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), in sg_fill()
625 last_sg->length); in sg_fill()
628 } while (++sg < end && (int) sg->dma_address < 0); in sg_fill()
646 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; in alpha_pci_map_sg()
650 sg->dma_length = sg->length; in alpha_pci_map_sg()
651 sg->dma_address in alpha_pci_map_sg()
653 sg->length, dac_allowed); in alpha_pci_map_sg()
654 if (sg->dma_address == DMA_MAPPING_ERROR) in alpha_pci_map_sg()
655 return -EIO; in alpha_pci_map_sg()
667 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_map_sg()
668 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in alpha_pci_map_sg()
669 arena = hose->sg_pci; in alpha_pci_map_sg()
670 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_map_sg()
671 arena = hose->sg_isa; in alpha_pci_map_sg()
673 max_dma = -1; in alpha_pci_map_sg()
681 if ((int) sg->dma_address < 0) in alpha_pci_map_sg()
690 out->dma_length = 0; in alpha_pci_map_sg()
692 if (out - start == 0) { in alpha_pci_map_sg()
694 return -ENOMEM; in alpha_pci_map_sg()
696 DBGA("pci_map_sg: %ld entries\n", out - start); in alpha_pci_map_sg()
698 return out - start; in alpha_pci_map_sg()
707 dma_unmap_sg(&pdev->dev, start, out - start, dir); in alpha_pci_map_sg()
708 return -ENOMEM; in alpha_pci_map_sg()
732 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_unmap_sg()
733 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in alpha_pci_unmap_sg()
734 arena = hose->sg_pci; in alpha_pci_unmap_sg()
735 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_unmap_sg()
736 arena = hose->sg_isa; in alpha_pci_unmap_sg()
738 fbeg = -1, fend = 0; in alpha_pci_unmap_sg()
740 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_sg()
744 size_t size; in alpha_pci_unmap_sg() local
748 addr = sg->dma_address; in alpha_pci_unmap_sg()
749 size = sg->dma_length; in alpha_pci_unmap_sg()
750 if (!size) in alpha_pci_unmap_sg()
754 /* It's a DAC address -- nothing to do. */ in alpha_pci_unmap_sg()
756 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
764 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
769 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
771 npages = iommu_num_pages(addr, size, PAGE_SIZE); in alpha_pci_unmap_sg()
772 ofs = (addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_sg()
775 tend = addr + size - 1; in alpha_pci_unmap_sg()
781 may have snuck back into the TLB since the last wrap flush), in alpha_pci_unmap_sg()
782 we need to flush the TLB before reallocating the latter. */ in alpha_pci_unmap_sg()
783 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) in alpha_pci_unmap_sg()
786 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_sg()
788 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); in alpha_pci_unmap_sg()
804 && (__direct_map_base + __direct_map_size - 1 <= mask || in alpha_pci_supported()
805 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) in alpha_pci_supported()
808 /* Check that we have a scatter-gather arena that fits. */ in alpha_pci_supported()
809 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_supported()
810 arena = hose->sg_isa; in alpha_pci_supported()
811 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
813 arena = hose->sg_pci; in alpha_pci_supported()
814 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
818 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) in alpha_pci_supported()
833 long i, p; in iommu_reserve() local
835 if (!arena) return -EINVAL; in iommu_reserve()
837 spin_lock_irqsave(&arena->lock, flags); in iommu_reserve()
840 ptes = arena->ptes; in iommu_reserve()
843 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
844 return -1; in iommu_reserve()
848 for the iommu tlb that could load them from under us. in iommu_reserve()
850 for (i = 0; i < pg_count; ++i) in iommu_reserve()
851 ptes[p+i] = IOMMU_RESERVED_PTE; in iommu_reserve()
853 arena->next_entry = p + pg_count; in iommu_reserve()
854 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
863 long i; in iommu_release() local
865 if (!arena) return -EINVAL; in iommu_release()
867 ptes = arena->ptes; in iommu_release()
870 for(i = pg_start; i < pg_start + pg_count; i++) in iommu_release()
871 if (ptes[i] != IOMMU_RESERVED_PTE) in iommu_release()
872 return -EBUSY; in iommu_release()
884 long i, j; in iommu_bind() local
886 if (!arena) return -EINVAL; in iommu_bind()
888 spin_lock_irqsave(&arena->lock, flags); in iommu_bind()
890 ptes = arena->ptes; in iommu_bind()
894 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
895 return -EBUSY; in iommu_bind()
899 for(i = 0, j = pg_start; i < pg_count; i++, j++) in iommu_bind()
900 ptes[j] = mk_iommu_pte(page_to_phys(pages[i])); in iommu_bind()
902 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
911 long i; in iommu_unbind() local
913 if (!arena) return -EINVAL; in iommu_unbind()
915 p = arena->ptes + pg_start; in iommu_unbind()
916 for(i = 0; i < pg_count; i++) in iommu_unbind()
917 p[i] = IOMMU_RESERVED_PTE; in iommu_unbind()