Lines Matching full:iommu
19 #include <asm/iommu.h>
39 unsigned long prot; /* IOMMU page protections */
76 printk("iommu_batch_flush: IOMMU map of " in iommu_batch_flush()
134 struct iommu *iommu; in dma_4v_alloc_coherent() local
155 iommu = dev->archdata.iommu; in dma_4v_alloc_coherent()
157 spin_lock_irqsave(&iommu->lock, flags); in dma_4v_alloc_coherent()
158 entry = iommu_range_alloc(dev, iommu, npages, NULL); in dma_4v_alloc_coherent()
159 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_alloc_coherent()
164 *dma_addrp = (iommu->page_table_map_base + in dma_4v_alloc_coherent()
191 spin_lock(&iommu->lock); in dma_4v_alloc_coherent()
192 iommu_range_free(iommu, *dma_addrp, npages); in dma_4v_alloc_coherent()
193 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_alloc_coherent()
204 struct iommu *iommu; in dma_4v_free_coherent() local
209 iommu = dev->archdata.iommu; in dma_4v_free_coherent()
212 entry = ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); in dma_4v_free_coherent()
214 spin_lock_irqsave(&iommu->lock, flags); in dma_4v_free_coherent()
216 iommu_range_free(iommu, dvma, npages); in dma_4v_free_coherent()
227 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_free_coherent()
239 struct iommu *iommu; in dma_4v_map_page() local
246 iommu = dev->archdata.iommu; in dma_4v_map_page()
255 spin_lock_irqsave(&iommu->lock, flags); in dma_4v_map_page()
256 entry = iommu_range_alloc(dev, iommu, npages, NULL); in dma_4v_map_page()
257 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_map_page()
262 bus_addr = (iommu->page_table_map_base + in dma_4v_map_page()
293 spin_lock(&iommu->lock); in dma_4v_map_page()
294 iommu_range_free(iommu, bus_addr, npages); in dma_4v_map_page()
295 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_map_page()
305 struct iommu *iommu; in dma_4v_unmap_page() local
316 iommu = dev->archdata.iommu; in dma_4v_unmap_page()
324 spin_lock_irqsave(&iommu->lock, flags); in dma_4v_unmap_page()
326 iommu_range_free(iommu, bus_addr, npages); in dma_4v_unmap_page()
328 entry = (bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; in dma_4v_unmap_page()
338 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_unmap_page()
351 struct iommu *iommu; in dma_4v_map_sg() local
357 iommu = dev->archdata.iommu; in dma_4v_map_sg()
358 if (nelems == 0 || !iommu) in dma_4v_map_sg()
373 spin_lock_irqsave(&iommu->lock, flags); in dma_4v_map_sg()
380 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; in dma_4v_map_sg()
390 /* Allocate iommu entries for that segment */ in dma_4v_map_sg()
393 entry = iommu_range_alloc(dev, iommu, npages, &handle); in dma_4v_map_sg()
398 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" in dma_4v_map_sg()
399 " npages %lx\n", iommu, paddr, npages); in dma_4v_map_sg()
406 dma_addr = iommu->page_table_map_base + in dma_4v_map_sg()
453 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_map_sg()
471 iommu_range_free(iommu, vaddr, npages); in dma_4v_map_sg()
479 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_map_sg()
490 struct iommu *iommu; in dma_4v_unmap_sg() local
496 iommu = dev->archdata.iommu; in dma_4v_unmap_sg()
500 spin_lock_irqsave(&iommu->lock, flags); in dma_4v_unmap_sg()
511 iommu_range_free(iommu, dma_handle, npages); in dma_4v_unmap_sg()
513 entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT); in dma_4v_unmap_sg()
526 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4v_unmap_sg()
553 struct iommu *iommu) in probe_existing_entries() argument
555 struct iommu_arena *arena = &iommu->arena; in probe_existing_entries()
583 struct iommu *iommu = pbm->iommu; in pci_sun4v_iommu_init() local
603 /* Setup initial software IOMMU state. */ in pci_sun4v_iommu_init()
604 spin_lock_init(&iommu->lock); in pci_sun4v_iommu_init()
605 iommu->ctx_lowest_free = 1; in pci_sun4v_iommu_init()
606 iommu->page_table_map_base = dma_offset; in pci_sun4v_iommu_init()
607 iommu->dma_addr_mask = dma_mask; in pci_sun4v_iommu_init()
612 iommu->arena.map = kzalloc(sz, GFP_KERNEL); in pci_sun4v_iommu_init()
613 if (!iommu->arena.map) { in pci_sun4v_iommu_init()
617 iommu->arena.limit = num_tsb_entries; in pci_sun4v_iommu_init()
619 sz = probe_existing_entries(pbm, iommu); in pci_sun4v_iommu_init()
927 struct iommu *iommu; in pci_sun4v_probe() local
976 iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); in pci_sun4v_probe()
977 if (!iommu) { in pci_sun4v_probe()
978 printk(KERN_ERR PFX "Could not allocate pbm iommu\n"); in pci_sun4v_probe()
982 pbm->iommu = iommu; in pci_sun4v_probe()
993 kfree(pbm->iommu); in pci_sun4v_probe()