Lines Matching defs:mmu

29 #include "ipu6-mmu.h"
54 static void tlb_invalidate(struct ipu6_mmu *mmu)
59 spin_lock_irqsave(&mmu->ready_lock, flags);
60 if (!mmu->ready) {
61 spin_unlock_irqrestore(&mmu->ready_lock, flags);
65 for (i = 0; i < mmu->nr_mmus; i++) {
74 if (mmu->mmu_hw[i].insert_read_before_invalidate)
75 readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
77 writel(0xffffffff, mmu->mmu_hw[i].base +
87 spin_unlock_irqrestore(&mmu->ready_lock, flags);
414 static int allocate_trash_buffer(struct ipu6_mmu *mmu)
424 iova = alloc_iova(&mmu->dmap->iovad, n_pages,
425 PHYS_PFN(mmu->dmap->mmu_info->aperture_end), 0);
427 dev_err(mmu->dev, "cannot allocate iova range for trash\n");
431 dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
433 if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
434 dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
439 mmu->pci_trash_page = dma;
443 * mmu->trash_page which is already reserved at the probe
447 ret = ipu6_mmu_map(mmu->dmap->mmu_info, PFN_PHYS(iova_addr),
448 mmu->pci_trash_page, PAGE_SIZE);
450 dev_err(mmu->dev,
458 mmu->iova_trash_page = PFN_PHYS(iova->pfn_lo);
459 dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
460 mmu->mmid, (unsigned int)mmu->iova_trash_page);
464 ipu6_mmu_unmap(mmu->dmap->mmu_info, PFN_PHYS(iova->pfn_lo),
466 dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
469 __free_iova(&mmu->dmap->iovad, iova);
473 int ipu6_mmu_hw_init(struct ipu6_mmu *mmu)
479 mmu_info = mmu->dmap->mmu_info;
482 for (i = 0; i < mmu->nr_mmus; i++) {
483 struct ipu6_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
489 mmu->mmu_hw[i].base + REG_L1_PHYS);
492 writel(mmu->mmu_hw[i].info_bits,
493 mmu->mmu_hw[i].base + REG_INFO);
497 block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
499 dev_err(mmu->dev, "invalid L1 configuration\n");
510 block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
512 dev_err(mmu->dev, "invalid L2 configuration\n");
521 if (!mmu->trash_page) {
524 mmu->trash_page = alloc_page(GFP_KERNEL);
525 if (!mmu->trash_page) {
526 dev_err(mmu->dev, "insufficient memory for trash buffer\n");
530 ret = allocate_trash_buffer(mmu);
532 __free_page(mmu->trash_page);
533 mmu->trash_page = NULL;
534 dev_err(mmu->dev, "trash buffer allocation failed\n");
539 spin_lock_irqsave(&mmu->ready_lock, flags);
540 mmu->ready = true;
541 spin_unlock_irqrestore(&mmu->ready_lock, flags);
602 void ipu6_mmu_hw_cleanup(struct ipu6_mmu *mmu)
606 spin_lock_irqsave(&mmu->ready_lock, flags);
607 mmu->ready = false;
608 spin_unlock_irqrestore(&mmu->ready_lock, flags);
705 static void ipu6_mmu_destroy(struct ipu6_mmu *mmu)
707 struct ipu6_dma_mapping *dmap = mmu->dmap;
712 if (mmu->iova_trash_page) {
713 iova = find_iova(&dmap->iovad, PHYS_PFN(mmu->iova_trash_page));
720 dev_err(mmu->dev, "trash buffer iova not found.\n");
723 mmu->iova_trash_page = 0;
724 dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
726 mmu->pci_trash_page = 0;
727 __free_page(mmu->trash_page);
754 struct ipu6_mmu *mmu;
776 mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
777 if (!mmu)
780 mmu->mmid = mmid;
781 mmu->mmu_hw = pdata->mmu_hw;
782 mmu->nr_mmus = hw->nr_mmus;
783 mmu->tlb_invalidate = tlb_invalidate;
784 mmu->ready = false;
785 INIT_LIST_HEAD(&mmu->vma_list);
786 spin_lock_init(&mmu->ready_lock);
788 mmu->dmap = alloc_dma_mapping(isp);
789 if (!mmu->dmap) {
794 return mmu;
797 void ipu6_mmu_cleanup(struct ipu6_mmu *mmu)
799 struct ipu6_dma_mapping *dmap = mmu->dmap;
801 ipu6_mmu_destroy(mmu);
802 mmu->dmap = NULL;