Lines Matching full:iommu

1 /* iommu.c: Generic sparc64 IOMMU support.
14 #include <linux/iommu-helper.h>
21 #include <asm/iommu.h>
46 /* Must be invoked under the IOMMU lock. */
47 static void iommu_flushall(struct iommu *iommu) in iommu_flushall() argument
49 if (iommu->iommu_flushinv) { in iommu_flushall()
50 iommu_write(iommu->iommu_flushinv, ~(u64)0); in iommu_flushall()
55 tag = iommu->iommu_tags; in iommu_flushall()
62 (void) iommu_read(iommu->write_complete_reg); in iommu_flushall()
76 #define IOPTE_IS_DUMMY(iommu, iopte) \ argument
77 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
79 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte) in iopte_make_dummy() argument
84 val |= iommu->dummy_page_pa; in iopte_make_dummy()
89 /* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
90 * facility it must all be done in one pass while under the iommu lock.
92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
97 struct iommu *iommu, in iommu_range_alloc() argument
102 struct iommu_arena *arena = &iommu->arena; in iommu_range_alloc()
127 if (iommu->flush_all) in iommu_range_alloc()
128 iommu->flush_all(iommu); in iommu_range_alloc()
140 iommu->page_table_map_base >> IO_PAGE_SHIFT, in iommu_range_alloc()
146 if (iommu->flush_all) in iommu_range_alloc()
147 iommu->flush_all(iommu); in iommu_range_alloc()
167 void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages) in iommu_range_free() argument
169 struct iommu_arena *arena = &iommu->arena; in iommu_range_free()
172 entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT; in iommu_range_free()
177 int iommu_table_init(struct iommu *iommu, int tsbsize, in iommu_table_init() argument
186 /* Setup initial software IOMMU state. */ in iommu_table_init()
187 spin_lock_init(&iommu->lock); in iommu_table_init()
188 iommu->ctx_lowest_free = 1; in iommu_table_init()
189 iommu->page_table_map_base = dma_offset; in iommu_table_init()
190 iommu->dma_addr_mask = dma_addr_mask; in iommu_table_init()
195 iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init()
196 if (!iommu->arena.map) { in iommu_table_init()
197 printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n"); in iommu_table_init()
200 memset(iommu->arena.map, 0, sz); in iommu_table_init()
201 iommu->arena.limit = num_tsb_entries; in iommu_table_init()
204 iommu->flush_all = iommu_flushall; in iommu_table_init()
211 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n"); in iommu_table_init()
214 iommu->dummy_page = (unsigned long) page_address(page); in iommu_table_init()
215 memset((void *)iommu->dummy_page, 0, PAGE_SIZE); in iommu_table_init()
216 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); in iommu_table_init()
218 /* Now allocate and setup the IOMMU page table itself. */ in iommu_table_init()
222 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n"); in iommu_table_init()
225 iommu->page_table = (iopte_t *)page_address(page); in iommu_table_init()
228 iopte_make_dummy(iommu, &iommu->page_table[i]); in iommu_table_init()
233 free_page(iommu->dummy_page); in iommu_table_init()
234 iommu->dummy_page = 0UL; in iommu_table_init()
237 kfree(iommu->arena.map); in iommu_table_init()
238 iommu->arena.map = NULL; in iommu_table_init()
243 static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, in alloc_npages() argument
248 entry = iommu_range_alloc(dev, iommu, npages, NULL); in alloc_npages()
252 return iommu->page_table + entry; in alloc_npages()
255 static int iommu_alloc_ctx(struct iommu *iommu) in iommu_alloc_ctx() argument
257 int lowest = iommu->ctx_lowest_free; in iommu_alloc_ctx()
258 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); in iommu_alloc_ctx()
261 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); in iommu_alloc_ctx()
263 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); in iommu_alloc_ctx()
268 __set_bit(n, iommu->ctx_bitmap); in iommu_alloc_ctx()
273 static inline void iommu_free_ctx(struct iommu *iommu, int ctx) in iommu_free_ctx() argument
276 __clear_bit(ctx, iommu->ctx_bitmap); in iommu_free_ctx()
277 if (ctx < iommu->ctx_lowest_free) in iommu_free_ctx()
278 iommu->ctx_lowest_free = ctx; in iommu_free_ctx()
286 struct iommu *iommu; in dma_4u_alloc_coherent() local
305 iommu = dev->archdata.iommu; in dma_4u_alloc_coherent()
307 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_alloc_coherent()
308 iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT); in dma_4u_alloc_coherent()
309 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_alloc_coherent()
316 *dma_addrp = (iommu->page_table_map_base + in dma_4u_alloc_coherent()
317 ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_alloc_coherent()
335 struct iommu *iommu; in dma_4u_free_coherent() local
339 iommu = dev->archdata.iommu; in dma_4u_free_coherent()
341 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_free_coherent()
343 iommu_range_free(iommu, dvma, npages); in dma_4u_free_coherent()
345 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_free_coherent()
357 struct iommu *iommu; in dma_4u_map_page() local
365 iommu = dev->archdata.iommu; in dma_4u_map_page()
375 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_map_page()
376 base = alloc_npages(dev, iommu, npages); in dma_4u_map_page()
378 if (iommu->iommu_ctxflush) in dma_4u_map_page()
379 ctx = iommu_alloc_ctx(iommu); in dma_4u_map_page()
380 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_page()
385 bus_addr = (iommu->page_table_map_base + in dma_4u_map_page()
386 ((base - iommu->page_table) << IO_PAGE_SHIFT)); in dma_4u_map_page()
402 iommu_free_ctx(iommu, ctx); in dma_4u_map_page()
409 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, in strbuf_flush() argument
416 iommu->iommu_ctxflush) { in strbuf_flush()
459 (void) iommu_read(iommu->write_complete_reg); in strbuf_flush()
479 struct iommu *iommu; in dma_4u_unmap_page() local
490 iommu = dev->archdata.iommu; in dma_4u_unmap_page()
495 base = iommu->page_table + in dma_4u_unmap_page()
496 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); in dma_4u_unmap_page()
499 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_unmap_page()
503 if (iommu->iommu_ctxflush) in dma_4u_unmap_page()
508 strbuf_flush(strbuf, iommu, bus_addr, ctx, in dma_4u_unmap_page()
513 iopte_make_dummy(iommu, base + i); in dma_4u_unmap_page()
515 iommu_range_free(iommu, bus_addr, npages); in dma_4u_unmap_page()
517 iommu_free_ctx(iommu, ctx); in dma_4u_unmap_page()
519 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_unmap_page()
533 struct iommu *iommu; in dma_4u_map_sg() local
538 iommu = dev->archdata.iommu; in dma_4u_map_sg()
540 if (nelems == 0 || !iommu) in dma_4u_map_sg()
543 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_map_sg()
546 if (iommu->iommu_ctxflush) in dma_4u_map_sg()
547 ctx = iommu_alloc_ctx(iommu); in dma_4u_map_sg()
567 base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT; in dma_4u_map_sg()
578 /* Allocate iommu entries for that segment */ in dma_4u_map_sg()
581 entry = iommu_range_alloc(dev, iommu, npages, &handle); in dma_4u_map_sg()
586 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" in dma_4u_map_sg()
587 " npages %lx\n", iommu, paddr, npages); in dma_4u_map_sg()
591 base = iommu->page_table + entry; in dma_4u_map_sg()
594 dma_addr = iommu->page_table_map_base + in dma_4u_map_sg()
635 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_sg()
654 iommu_range_free(iommu, vaddr, npages); in dma_4u_map_sg()
656 entry = (vaddr - iommu->page_table_map_base) in dma_4u_map_sg()
658 base = iommu->page_table + entry; in dma_4u_map_sg()
661 iopte_make_dummy(iommu, base + j); in dma_4u_map_sg()
669 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_map_sg()
677 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) in fetch_sg_ctx() argument
681 if (iommu->iommu_ctxflush) { in fetch_sg_ctx()
686 base = iommu->page_table + in fetch_sg_ctx()
687 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); in fetch_sg_ctx()
701 struct iommu *iommu; in dma_4u_unmap_sg() local
705 iommu = dev->archdata.iommu; in dma_4u_unmap_sg()
708 ctx = fetch_sg_ctx(iommu, sglist); in dma_4u_unmap_sg()
710 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_unmap_sg()
723 iommu_range_free(iommu, dma_handle, npages); in dma_4u_unmap_sg()
725 entry = ((dma_handle - iommu->page_table_map_base) in dma_4u_unmap_sg()
727 base = iommu->page_table + entry; in dma_4u_unmap_sg()
731 strbuf_flush(strbuf, iommu, dma_handle, ctx, in dma_4u_unmap_sg()
735 iopte_make_dummy(iommu, base + i); in dma_4u_unmap_sg()
740 iommu_free_ctx(iommu, ctx); in dma_4u_unmap_sg()
742 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_unmap_sg()
749 struct iommu *iommu; in dma_4u_sync_single_for_cpu() local
753 iommu = dev->archdata.iommu; in dma_4u_sync_single_for_cpu()
759 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_sync_single_for_cpu()
767 if (iommu->iommu_ctxflush && in dma_4u_sync_single_for_cpu()
771 iopte = iommu->page_table + in dma_4u_sync_single_for_cpu()
772 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT); in dma_4u_sync_single_for_cpu()
777 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); in dma_4u_sync_single_for_cpu()
779 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_sync_single_for_cpu()
786 struct iommu *iommu; in dma_4u_sync_sg_for_cpu() local
792 iommu = dev->archdata.iommu; in dma_4u_sync_sg_for_cpu()
798 spin_lock_irqsave(&iommu->lock, flags); in dma_4u_sync_sg_for_cpu()
802 if (iommu->iommu_ctxflush && in dma_4u_sync_sg_for_cpu()
806 iopte = iommu->page_table + in dma_4u_sync_sg_for_cpu()
807 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT); in dma_4u_sync_sg_for_cpu()
822 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); in dma_4u_sync_sg_for_cpu()
824 spin_unlock_irqrestore(&iommu->lock, flags); in dma_4u_sync_sg_for_cpu()
845 struct iommu *iommu = dev->archdata.iommu; in dma_supported() local
846 u64 dma_addr_mask = iommu->dma_addr_mask; in dma_supported()