1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Helpers for IOMMU drivers implementing SVA
4 */
5 #include <linux/mmu_context.h>
6 #include <linux/mutex.h>
7 #include <linux/sched/mm.h>
8 #include <linux/iommu.h>
9
10 #include "iommu-sva.h"
11
12 static DEFINE_MUTEX(iommu_sva_lock);
13
14 /* Allocate a PASID for the mm within range (inclusive) */
iommu_alloc_mm_data(struct mm_struct * mm,struct device * dev)15 static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
16 {
17 struct iommu_mm_data *iommu_mm;
18 ioasid_t pasid;
19
20 lockdep_assert_held(&iommu_sva_lock);
21
22 if (!arch_pgtable_dma_compat(mm))
23 return ERR_PTR(-EBUSY);
24
25 iommu_mm = mm->iommu_mm;
26 /* Is a PASID already associated with this mm? */
27 if (iommu_mm) {
28 if (iommu_mm->pasid >= dev->iommu->max_pasids)
29 return ERR_PTR(-EOVERFLOW);
30 return iommu_mm;
31 }
32
33 iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
34 if (!iommu_mm)
35 return ERR_PTR(-ENOMEM);
36
37 pasid = iommu_alloc_global_pasid(dev);
38 if (pasid == IOMMU_PASID_INVALID) {
39 kfree(iommu_mm);
40 return ERR_PTR(-ENOSPC);
41 }
42 iommu_mm->pasid = pasid;
43 INIT_LIST_HEAD(&iommu_mm->sva_domains);
44 INIT_LIST_HEAD(&iommu_mm->sva_handles);
45 /*
46 * Make sure the write to mm->iommu_mm is not reordered in front of
47 * initialization to iommu_mm fields. If it does, readers may see a
48 * valid iommu_mm with uninitialized values.
49 */
50 smp_store_release(&mm->iommu_mm, iommu_mm);
51 return iommu_mm;
52 }
53
54 /**
55 * iommu_sva_bind_device() - Bind a process address space to a device
56 * @dev: the device
57 * @mm: the mm to bind, caller must hold a reference to mm_users
58 *
59 * Create a bond between device and address space, allowing the device to
60 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
61 * bond already exists between @device and @mm, an additional internal
62 * reference is taken. Caller must call iommu_sva_unbind_device()
63 * to release each reference.
64 *
65 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
66 * initialize the required SVA features.
67 *
68 * On error, returns an ERR_PTR value.
69 */
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)70 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
71 {
72 struct iommu_mm_data *iommu_mm;
73 struct iommu_domain *domain;
74 struct iommu_sva *handle;
75 int ret;
76
77 mutex_lock(&iommu_sva_lock);
78
79 /* Allocate mm->pasid if necessary. */
80 iommu_mm = iommu_alloc_mm_data(mm, dev);
81 if (IS_ERR(iommu_mm)) {
82 ret = PTR_ERR(iommu_mm);
83 goto out_unlock;
84 }
85
86 list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
87 if (handle->dev == dev) {
88 refcount_inc(&handle->users);
89 mutex_unlock(&iommu_sva_lock);
90 return handle;
91 }
92 }
93
94 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
95 if (!handle) {
96 ret = -ENOMEM;
97 goto out_unlock;
98 }
99
100 /* Search for an existing domain. */
101 list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
102 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
103 if (!ret) {
104 domain->users++;
105 goto out;
106 }
107 }
108
109 /* Allocate a new domain and set it on device pasid. */
110 domain = iommu_sva_domain_alloc(dev, mm);
111 if (!domain) {
112 ret = -ENOMEM;
113 goto out_free_handle;
114 }
115
116 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
117 if (ret)
118 goto out_free_domain;
119 domain->users = 1;
120 list_add(&domain->next, &mm->iommu_mm->sva_domains);
121
122 out:
123 refcount_set(&handle->users, 1);
124 list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
125 mutex_unlock(&iommu_sva_lock);
126 handle->dev = dev;
127 handle->domain = domain;
128 return handle;
129
130 out_free_domain:
131 iommu_domain_free(domain);
132 out_free_handle:
133 kfree(handle);
134 out_unlock:
135 mutex_unlock(&iommu_sva_lock);
136 return ERR_PTR(ret);
137 }
138 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
139
140 /**
141 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
142 * @handle: the handle returned by iommu_sva_bind_device()
143 *
144 * Put reference to a bond between device and address space. The device should
145 * not be issuing any more transaction for this PASID. All outstanding page
146 * requests for this PASID must have been flushed to the IOMMU.
147 */
iommu_sva_unbind_device(struct iommu_sva * handle)148 void iommu_sva_unbind_device(struct iommu_sva *handle)
149 {
150 struct iommu_domain *domain = handle->domain;
151 struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
152 struct device *dev = handle->dev;
153
154 mutex_lock(&iommu_sva_lock);
155 if (!refcount_dec_and_test(&handle->users)) {
156 mutex_unlock(&iommu_sva_lock);
157 return;
158 }
159 list_del(&handle->handle_item);
160
161 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
162 if (--domain->users == 0) {
163 list_del(&domain->next);
164 iommu_domain_free(domain);
165 }
166 mutex_unlock(&iommu_sva_lock);
167 kfree(handle);
168 }
169 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
170
iommu_sva_get_pasid(struct iommu_sva * handle)171 u32 iommu_sva_get_pasid(struct iommu_sva *handle)
172 {
173 struct iommu_domain *domain = handle->domain;
174
175 return mm_get_enqcmd_pasid(domain->mm);
176 }
177 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
178
179 /*
180 * I/O page fault handler for SVA
181 */
182 enum iommu_page_response_code
iommu_sva_handle_iopf(struct iommu_fault * fault,void * data)183 iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
184 {
185 vm_fault_t ret;
186 struct vm_area_struct *vma;
187 struct mm_struct *mm = data;
188 unsigned int access_flags = 0;
189 unsigned int fault_flags = FAULT_FLAG_REMOTE;
190 struct iommu_fault_page_request *prm = &fault->prm;
191 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
192
193 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
194 return status;
195
196 if (!mmget_not_zero(mm))
197 return status;
198
199 mmap_read_lock(mm);
200
201 vma = vma_lookup(mm, prm->addr);
202 if (!vma)
203 /* Unmapped area */
204 goto out_put_mm;
205
206 if (prm->perm & IOMMU_FAULT_PERM_READ)
207 access_flags |= VM_READ;
208
209 if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
210 access_flags |= VM_WRITE;
211 fault_flags |= FAULT_FLAG_WRITE;
212 }
213
214 if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
215 access_flags |= VM_EXEC;
216 fault_flags |= FAULT_FLAG_INSTRUCTION;
217 }
218
219 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
220 fault_flags |= FAULT_FLAG_USER;
221
222 if (access_flags & ~vma->vm_flags)
223 /* Access fault */
224 goto out_put_mm;
225
226 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
227 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
228 IOMMU_PAGE_RESP_SUCCESS;
229
230 out_put_mm:
231 mmap_read_unlock(mm);
232 mmput(mm);
233
234 return status;
235 }
236
mm_pasid_drop(struct mm_struct * mm)237 void mm_pasid_drop(struct mm_struct *mm)
238 {
239 struct iommu_mm_data *iommu_mm = mm->iommu_mm;
240
241 if (!iommu_mm)
242 return;
243
244 iommu_free_global_pasid(iommu_mm->pasid);
245 kfree(iommu_mm);
246 }
247