Lines Matching full:iommu

22 #include "iommu.h"
25 #include "../iommu-sva.h"
64 int intel_svm_enable_prq(struct intel_iommu *iommu) in intel_svm_enable_prq() argument
72 pr_warn("IOMMU: %s: Failed to allocate page request queue\n", in intel_svm_enable_prq()
73 iommu->name); in intel_svm_enable_prq()
76 iommu->prq = page_address(pages); in intel_svm_enable_prq()
78 irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); in intel_svm_enable_prq()
80 pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n", in intel_svm_enable_prq()
81 iommu->name); in intel_svm_enable_prq()
85 iommu->pr_irq = irq; in intel_svm_enable_prq()
87 snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), in intel_svm_enable_prq()
88 "dmar%d-iopfq", iommu->seq_id); in intel_svm_enable_prq()
89 iopfq = iopf_queue_alloc(iommu->iopfq_name); in intel_svm_enable_prq()
91 pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name); in intel_svm_enable_prq()
95 iommu->iopf_queue = iopfq; in intel_svm_enable_prq()
97 snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id); in intel_svm_enable_prq()
100 iommu->prq_name, iommu); in intel_svm_enable_prq()
102 pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", in intel_svm_enable_prq()
103 iommu->name); in intel_svm_enable_prq()
106 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_enable_prq()
107 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_enable_prq()
108 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
110 init_completion(&iommu->prq_complete); in intel_svm_enable_prq()
115 iopf_queue_free(iommu->iopf_queue); in intel_svm_enable_prq()
116 iommu->iopf_queue = NULL; in intel_svm_enable_prq()
119 iommu->pr_irq = 0; in intel_svm_enable_prq()
121 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
122 iommu->prq = NULL; in intel_svm_enable_prq()
127 int intel_svm_finish_prq(struct intel_iommu *iommu) in intel_svm_finish_prq() argument
129 dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); in intel_svm_finish_prq()
130 dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); in intel_svm_finish_prq()
131 dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); in intel_svm_finish_prq()
133 if (iommu->pr_irq) { in intel_svm_finish_prq()
134 free_irq(iommu->pr_irq, iommu); in intel_svm_finish_prq()
135 dmar_free_hwirq(iommu->pr_irq); in intel_svm_finish_prq()
136 iommu->pr_irq = 0; in intel_svm_finish_prq()
139 if (iommu->iopf_queue) { in intel_svm_finish_prq()
140 iopf_queue_free(iommu->iopf_queue); in intel_svm_finish_prq()
141 iommu->iopf_queue = NULL; in intel_svm_finish_prq()
144 free_pages((unsigned long)iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
145 iommu->prq = NULL; in intel_svm_finish_prq()
150 void intel_svm_check(struct intel_iommu *iommu) in intel_svm_check() argument
152 if (!pasid_supported(iommu)) in intel_svm_check()
156 !cap_fl1gp_support(iommu->cap)) { in intel_svm_check()
158 iommu->name); in intel_svm_check()
163 !cap_fl5lp_support(iommu->cap)) { in intel_svm_check()
165 iommu->name); in intel_svm_check()
169 iommu->flags |= VTD_FLAG_SVM_CAPABLE; in intel_svm_check()
182 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih); in __flush_svm_range_dev()
184 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in __flush_svm_range_dev()
228 qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0); in intel_flush_svm_all()
230 qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, in intel_flush_svm_all()
275 intel_pasid_tear_down_entry(sdev->iommu, sdev->dev, in intel_mm_release()
318 static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, in intel_svm_bind_mm() argument
360 sdev->iommu = iommu; in intel_svm_bind_mm()
372 ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid, in intel_svm_bind_mm()
479 struct intel_iommu *iommu; in intel_drain_pasid_prq() local
493 iommu = info->iommu; in intel_drain_pasid_prq()
497 did = domain_id_iommu(domain, iommu); in intel_drain_pasid_prq()
505 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
506 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
507 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in intel_drain_pasid_prq()
511 req = &iommu->prq[head / sizeof(*req)]; in intel_drain_pasid_prq()
517 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
541 reinit_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
542 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); in intel_drain_pasid_prq()
543 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in intel_drain_pasid_prq()
544 wait_for_completion(&iommu->prq_complete); in intel_drain_pasid_prq()
565 static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, in intel_svm_prq_report() argument
598 } else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) { in intel_svm_prq_report()
609 static void handle_bad_prq_event(struct intel_iommu *iommu, in handle_bad_prq_event() argument
615 iommu->name, ((unsigned long long *)req)[0], in handle_bad_prq_event()
645 qi_submit_sync(iommu, &desc, 1, 0); in handle_bad_prq_event()
650 struct intel_iommu *iommu = d; in prq_event_thread() local
660 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
662 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
663 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
666 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
670 pr_err("IOMMU: %s: Page request without PASID\n", in prq_event_thread()
671 iommu->name); in prq_event_thread()
673 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
678 pr_err("IOMMU: %s: Address is not canonical\n", in prq_event_thread()
679 iommu->name); in prq_event_thread()
684 pr_err("IOMMU: %s: Page request in Privilege Mode\n", in prq_event_thread()
685 iommu->name); in prq_event_thread()
690 pr_err("IOMMU: %s: Execution request not supported\n", in prq_event_thread()
691 iommu->name); in prq_event_thread()
699 pdev = pci_get_domain_bus_and_slot(iommu->segment, in prq_event_thread()
703 * If prq is to be handled outside iommu driver via receiver of in prq_event_thread()
709 if (intel_svm_prq_report(iommu, &pdev->dev, req)) in prq_event_thread()
710 handle_bad_prq_event(iommu, req, QI_RESP_INVALID); in prq_event_thread()
712 trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, in prq_event_thread()
714 iommu->prq_seq_number++); in prq_event_thread()
720 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); in prq_event_thread()
726 if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) { in prq_event_thread()
727 pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n", in prq_event_thread()
728 iommu->name); in prq_event_thread()
729 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; in prq_event_thread()
730 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; in prq_event_thread()
732 iopf_queue_discard_partial(iommu->iopf_queue); in prq_event_thread()
733 writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG); in prq_event_thread()
734 pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared", in prq_event_thread()
735 iommu->name); in prq_event_thread()
739 if (!completion_done(&iommu->prq_complete)) in prq_event_thread()
740 complete(&iommu->prq_complete); in prq_event_thread()
750 struct intel_iommu *iommu = info->iommu; in intel_svm_page_response() local
797 dmar_latency_update(iommu, DMAR_LATENCY_PRQ, in intel_svm_page_response()
801 qi_submit_sync(iommu, &desc, 1, 0); in intel_svm_page_response()
811 struct intel_iommu *iommu = info->iommu; in intel_svm_set_dev_pasid() local
813 return intel_svm_bind_mm(iommu, dev, domain, pasid); in intel_svm_set_dev_pasid()