Lines Matching +full:dac +full:- +full:mode +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
43 return (paddr >> (PAGE_SHIFT-1)) | 1; in mk_iommu_pte()
70 particular systems can over-align the arena. */ in iommu_arena_new_node()
78 arena->ptes = memblock_alloc(mem_size, align); in iommu_arena_new_node()
79 if (!arena->ptes) in iommu_arena_new_node()
83 spin_lock_init(&arena->lock); in iommu_arena_new_node()
84 arena->hose = hose; in iommu_arena_new_node()
85 arena->dma_base = base; in iommu_arena_new_node()
86 arena->size = window_size; in iommu_arena_new_node()
87 arena->next_entry = 0; in iommu_arena_new_node()
91 arena->align_entry = 1; in iommu_arena_new_node()
106 long n, long mask) in iommu_arena_find_pages() argument
114 base = arena->dma_base >> PAGE_SHIFT; in iommu_arena_find_pages()
117 /* Search forward for the first mask-aligned sequence of N free ptes */ in iommu_arena_find_pages()
118 ptes = arena->ptes; in iommu_arena_find_pages()
119 nent = arena->size >> PAGE_SHIFT; in iommu_arena_find_pages()
120 p = ALIGN(arena->next_entry, mask + 1); in iommu_arena_find_pages()
126 p = ALIGN(p + 1, mask + 1); in iommu_arena_find_pages()
131 p = ALIGN(p + i + 1, mask + 1); in iommu_arena_find_pages()
144 alpha_mv.mv_pci_tbi(arena->hose, 0, -1); in iommu_arena_find_pages()
151 return -1; in iommu_arena_find_pages()
165 long i, p, mask; in iommu_arena_alloc() local
167 spin_lock_irqsave(&arena->lock, flags); in iommu_arena_alloc()
170 ptes = arena->ptes; in iommu_arena_alloc()
171 mask = max(align, arena->align_entry) - 1; in iommu_arena_alloc()
172 p = iommu_arena_find_pages(dev, arena, n, mask); in iommu_arena_alloc()
174 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
175 return -1; in iommu_arena_alloc()
185 arena->next_entry = p + n; in iommu_arena_alloc()
186 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
197 p = arena->ptes + ofs; in iommu_arena_free()
203 * True if the machine supports DAC addressing, and DEV can
204 * make use of it given MASK.
206 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) in pci_dac_dma_supported() argument
211 /* If this is not set, the machine doesn't support DAC at all. */ in pci_dac_dma_supported()
215 /* The device has to be able to address our DAC bit. */ in pci_dac_dma_supported()
216 if ((dac_offset & dev->dma_mask) != dac_offset) in pci_dac_dma_supported()
227 mode. The 32-bit PCI bus mastering address to use is returned.
235 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; in pci_map_single_1()
236 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in pci_map_single_1()
242 struct device *dev = pdev ? &pdev->dev : NULL; in pci_map_single_1()
248 if (paddr + size + __direct_map_base - 1 <= max_dma in pci_map_single_1()
252 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n", in pci_map_single_1()
259 /* Next, use DAC if selected earlier. */ in pci_map_single_1()
263 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n", in pci_map_single_1()
277 arena = hose->sg_pci; in pci_map_single_1()
278 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in pci_map_single_1()
279 arena = hose->sg_isa; in pci_map_single_1()
295 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); in pci_map_single_1()
297 ret = arena->dma_base + dma_ofs * PAGE_SIZE; in pci_map_single_1()
300 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n", in pci_map_single_1()
306 /* Helper for generic DMA-mapping functions. */
312 /* Assume that non-PCI devices asking for DMA are either ISA or EISA, in alpha_gendev_to_pci()
316 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA in alpha_gendev_to_pci()
318 if (!dev || !dev->dma_mask || !*dev->dma_mask) in alpha_gendev_to_pci()
323 if (*dev->dma_mask >= isa_bridge->dma_mask) in alpha_gendev_to_pci()
340 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; in alpha_pci_map_page()
345 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
357 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_unmap_page()
374 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n", in alpha_pci_unmap_page()
379 arena = hose->sg_pci; in alpha_pci_unmap_page()
380 if (!arena || dma_addr < arena->dma_base) in alpha_pci_unmap_page()
381 arena = hose->sg_isa; in alpha_pci_unmap_page()
383 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_page()
384 if (dma_ofs * PAGE_SIZE >= arena->size) { in alpha_pci_unmap_page()
387 dma_addr, arena->dma_base, arena->size); in alpha_pci_unmap_page()
394 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_page()
401 if (dma_ofs >= arena->next_entry) in alpha_pci_unmap_page()
402 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); in alpha_pci_unmap_page()
404 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_page()
410 /* Allocate and map kernel buffer using consistent mode DMA for PCI
411 device. Returns non-NULL cpu-view pointer to the buffer if
442 /* The address doesn't fit required mask and we in alpha_pci_alloc_coherent()
448 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", in alpha_pci_alloc_coherent()
465 dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL); in alpha_pci_free_coherent()
476 -1 : Not leader, physically adjacent to previous.
477 -2 : Not leader, virtually adjacent to previous.
495 leader_length = leader->length; in sg_classify()
503 len = sg->length; in sg_classify()
509 sg->dma_address = -1; in sg_classify()
512 sg->dma_address = -2; in sg_classify()
517 leader->dma_address = leader_flag; in sg_classify()
518 leader->dma_length = leader_length; in sg_classify()
527 leader->dma_address = leader_flag; in sg_classify()
528 leader->dma_length = leader_length; in sg_classify()
540 long size = leader->dma_length; in sg_fill()
547 fall into the direct-map window, use it. */ in sg_fill()
548 if (leader->dma_address == 0 in sg_fill()
549 && paddr + size + __direct_map_base - 1 <= max_dma in sg_fill()
551 out->dma_address = paddr + __direct_map_base; in sg_fill()
552 out->dma_length = size; in sg_fill()
554 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", in sg_fill()
555 __va(paddr), size, out->dma_address); in sg_fill()
561 /* If physically contiguous and DAC is available, use it. */ in sg_fill()
562 if (leader->dma_address == 0 && dac_allowed) { in sg_fill()
563 out->dma_address = paddr + alpha_mv.pci_dac_offset; in sg_fill()
564 out->dma_length = size; in sg_fill()
566 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", in sg_fill()
567 __va(paddr), size, out->dma_address); in sg_fill()
580 if (leader->dma_address == 0) in sg_fill()
581 return -1; in sg_fill()
589 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; in sg_fill()
590 out->dma_length = size; in sg_fill()
592 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", in sg_fill()
593 __va(paddr), size, out->dma_address, npages); in sg_fill()
597 ptes = &arena->ptes[dma_ofs]; in sg_fill()
604 size = sg->length; in sg_fill()
607 while (sg+1 < end && (int) sg[1].dma_address == -1) { in sg_fill()
620 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), in sg_fill()
621 last_sg->length, npages); in sg_fill()
624 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), in sg_fill()
625 last_sg->length); in sg_fill()
628 } while (++sg < end && (int) sg->dma_address < 0); in sg_fill()
646 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; in alpha_pci_map_sg()
650 sg->dma_length = sg->length; in alpha_pci_map_sg()
651 sg->dma_address in alpha_pci_map_sg()
653 sg->length, dac_allowed); in alpha_pci_map_sg()
654 if (sg->dma_address == DMA_MAPPING_ERROR) in alpha_pci_map_sg()
655 return -EIO; in alpha_pci_map_sg()
667 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_map_sg()
668 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in alpha_pci_map_sg()
669 arena = hose->sg_pci; in alpha_pci_map_sg()
670 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_map_sg()
671 arena = hose->sg_isa; in alpha_pci_map_sg()
673 max_dma = -1; in alpha_pci_map_sg()
681 if ((int) sg->dma_address < 0) in alpha_pci_map_sg()
690 out->dma_length = 0; in alpha_pci_map_sg()
692 if (out - start == 0) { in alpha_pci_map_sg()
694 return -ENOMEM; in alpha_pci_map_sg()
696 DBGA("pci_map_sg: %ld entries\n", out - start); in alpha_pci_map_sg()
698 return out - start; in alpha_pci_map_sg()
707 dma_unmap_sg(&pdev->dev, start, out - start, dir); in alpha_pci_map_sg()
708 return -ENOMEM; in alpha_pci_map_sg()
711 /* Unmap a set of streaming mode DMA translations. Again, cpu read
732 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_unmap_sg()
733 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in alpha_pci_unmap_sg()
734 arena = hose->sg_pci; in alpha_pci_unmap_sg()
735 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_unmap_sg()
736 arena = hose->sg_isa; in alpha_pci_unmap_sg()
738 fbeg = -1, fend = 0; in alpha_pci_unmap_sg()
740 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_sg()
748 addr = sg->dma_address; in alpha_pci_unmap_sg()
749 size = sg->dma_length; in alpha_pci_unmap_sg()
754 /* It's a DAC address -- nothing to do. */ in alpha_pci_unmap_sg()
755 DBGA(" (%ld) DAC [%llx,%zx]\n", in alpha_pci_unmap_sg()
756 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
764 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
769 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
772 ofs = (addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_sg()
775 tend = addr + size - 1; in alpha_pci_unmap_sg()
783 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) in alpha_pci_unmap_sg()
786 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_sg()
788 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); in alpha_pci_unmap_sg()
791 /* Return whether the given PCI device DMA address mask can be
794 static int alpha_pci_supported(struct device *dev, u64 mask) in alpha_pci_supported() argument
800 /* If there exists a direct map, and the mask fits either in alpha_pci_supported()
804 && (__direct_map_base + __direct_map_size - 1 <= mask || in alpha_pci_supported()
805 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) in alpha_pci_supported()
808 /* Check that we have a scatter-gather arena that fits. */ in alpha_pci_supported()
809 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_supported()
810 arena = hose->sg_isa; in alpha_pci_supported()
811 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
813 arena = hose->sg_pci; in alpha_pci_supported()
814 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
818 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) in alpha_pci_supported()
835 if (!arena) return -EINVAL; in iommu_reserve()
837 spin_lock_irqsave(&arena->lock, flags); in iommu_reserve()
840 ptes = arena->ptes; in iommu_reserve()
843 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
844 return -1; in iommu_reserve()
853 arena->next_entry = p + pg_count; in iommu_reserve()
854 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
865 if (!arena) return -EINVAL; in iommu_release()
867 ptes = arena->ptes; in iommu_release()
872 return -EBUSY; in iommu_release()
886 if (!arena) return -EINVAL; in iommu_bind()
888 spin_lock_irqsave(&arena->lock, flags); in iommu_bind()
890 ptes = arena->ptes; in iommu_bind()
894 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
895 return -EBUSY; in iommu_bind()
902 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
913 if (!arena) return -EINVAL; in iommu_unbind()
915 p = arena->ptes + pg_start; in iommu_unbind()