1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
3 *
4 */
5 #ifndef __IO_PAGETABLE_H
6 #define __IO_PAGETABLE_H
7
8 #include <linux/interval_tree.h>
9 #include <linux/kref.h>
10 #include <linux/mutex.h>
11 #include <linux/xarray.h>
12
13 #include "iommufd_private.h"
14
15 struct iommu_domain;
16
17 /*
18 * Each io_pagetable is composed of intervals of areas which cover regions of
19 * the iova that are backed by something. iova not covered by areas is not
20 * populated in the page table. Each area is fully populated with pages.
21 *
22 * iovas are in byte units, but must be iopt->iova_alignment aligned.
23 *
24 * pages can be NULL, this means some other thread is still working on setting
25 * up or tearing down the area. When observed under the write side of the
26 * domain_rwsem a NULL pages must mean the area is still being setup and no
27 * domains are filled.
28 *
29 * storage_domain points at an arbitrary iommu_domain that is holding the PFNs
30 * for this area. It is locked by the pages->mutex. This simplifies the locking
31 * as the pages code can rely on the storage_domain without having to get the
32 * iopt->domains_rwsem.
33 *
34 * The io_pagetable::iova_rwsem protects node
35 * The iopt_pages::mutex protects pages_node
36 * iopt and iommu_prot are immutable
37 * The pages::mutex protects num_accesses
38 */
39 struct iopt_area {
40 struct interval_tree_node node;
41 struct interval_tree_node pages_node;
42 struct io_pagetable *iopt;
43 struct iopt_pages *pages;
44 struct iommu_domain *storage_domain;
45 /* How many bytes into the first page the area starts */
46 unsigned int page_offset;
47 /* IOMMU_READ, IOMMU_WRITE, etc */
48 int iommu_prot;
49 bool prevent_access : 1;
50 unsigned int num_accesses;
51 unsigned int num_locks;
52 };
53
54 struct iopt_allowed {
55 struct interval_tree_node node;
56 };
57
58 struct iopt_reserved {
59 struct interval_tree_node node;
60 void *owner;
61 };
62
63 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages);
64 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages);
65
66 int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain);
67 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages,
68 struct iommu_domain *domain);
69 void iopt_area_unmap_domain(struct iopt_area *area,
70 struct iommu_domain *domain);
71
iopt_area_index(struct iopt_area * area)72 static inline unsigned long iopt_area_index(struct iopt_area *area)
73 {
74 return area->pages_node.start;
75 }
76
iopt_area_last_index(struct iopt_area * area)77 static inline unsigned long iopt_area_last_index(struct iopt_area *area)
78 {
79 return area->pages_node.last;
80 }
81
iopt_area_iova(struct iopt_area * area)82 static inline unsigned long iopt_area_iova(struct iopt_area *area)
83 {
84 return area->node.start;
85 }
86
iopt_area_last_iova(struct iopt_area * area)87 static inline unsigned long iopt_area_last_iova(struct iopt_area *area)
88 {
89 return area->node.last;
90 }
91
iopt_area_length(struct iopt_area * area)92 static inline size_t iopt_area_length(struct iopt_area *area)
93 {
94 return (area->node.last - area->node.start) + 1;
95 }
96
97 /*
98 * Number of bytes from the start of the iopt_pages that the iova begins.
99 * iopt_area_start_byte() / PAGE_SIZE encodes the starting page index
100 * iopt_area_start_byte() % PAGE_SIZE encodes the offset within that page
101 */
iopt_area_start_byte(struct iopt_area * area,unsigned long iova)102 static inline unsigned long iopt_area_start_byte(struct iopt_area *area,
103 unsigned long iova)
104 {
105 if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
106 WARN_ON(iova < iopt_area_iova(area) ||
107 iova > iopt_area_last_iova(area));
108 return (iova - iopt_area_iova(area)) + area->page_offset +
109 iopt_area_index(area) * PAGE_SIZE;
110 }
111
iopt_area_iova_to_index(struct iopt_area * area,unsigned long iova)112 static inline unsigned long iopt_area_iova_to_index(struct iopt_area *area,
113 unsigned long iova)
114 {
115 return iopt_area_start_byte(area, iova) / PAGE_SIZE;
116 }
117
118 #define __make_iopt_iter(name) \
119 static inline struct iopt_##name *iopt_##name##_iter_first( \
120 struct io_pagetable *iopt, unsigned long start, \
121 unsigned long last) \
122 { \
123 struct interval_tree_node *node; \
124 \
125 lockdep_assert_held(&iopt->iova_rwsem); \
126 node = interval_tree_iter_first(&iopt->name##_itree, start, \
127 last); \
128 if (!node) \
129 return NULL; \
130 return container_of(node, struct iopt_##name, node); \
131 } \
132 static inline struct iopt_##name *iopt_##name##_iter_next( \
133 struct iopt_##name *last_node, unsigned long start, \
134 unsigned long last) \
135 { \
136 struct interval_tree_node *node; \
137 \
138 node = interval_tree_iter_next(&last_node->node, start, last); \
139 if (!node) \
140 return NULL; \
141 return container_of(node, struct iopt_##name, node); \
142 }
143
144 __make_iopt_iter(area)
145 __make_iopt_iter(allowed)
146 __make_iopt_iter(reserved)
147
148 struct iopt_area_contig_iter {
149 unsigned long cur_iova;
150 unsigned long last_iova;
151 struct iopt_area *area;
152 };
153 struct iopt_area *iopt_area_contig_init(struct iopt_area_contig_iter *iter,
154 struct io_pagetable *iopt,
155 unsigned long iova,
156 unsigned long last_iova);
157 struct iopt_area *iopt_area_contig_next(struct iopt_area_contig_iter *iter);
158
iopt_area_contig_done(struct iopt_area_contig_iter * iter)159 static inline bool iopt_area_contig_done(struct iopt_area_contig_iter *iter)
160 {
161 return iter->area && iter->last_iova <= iopt_area_last_iova(iter->area);
162 }
163
164 /*
165 * Iterate over a contiguous list of areas that span the iova,last_iova range.
166 * The caller must check iopt_area_contig_done() after the loop to see if
167 * contiguous areas existed.
168 */
169 #define iopt_for_each_contig_area(iter, area, iopt, iova, last_iova) \
170 for (area = iopt_area_contig_init(iter, iopt, iova, last_iova); area; \
171 area = iopt_area_contig_next(iter))
172
173 enum {
174 IOPT_PAGES_ACCOUNT_NONE = 0,
175 IOPT_PAGES_ACCOUNT_USER = 1,
176 IOPT_PAGES_ACCOUNT_MM = 2,
177 IOPT_PAGES_ACCOUNT_MODE_NUM = 3,
178 };
179
180 enum iopt_address_type {
181 IOPT_ADDRESS_USER = 0,
182 IOPT_ADDRESS_FILE = 1,
183 };
184
185 /*
186 * This holds a pinned page list for multiple areas of IO address space. The
187 * pages always originate from a linear chunk of userspace VA. Multiple
188 * io_pagetable's, through their iopt_area's, can share a single iopt_pages
189 * which avoids multi-pinning and double accounting of page consumption.
190 *
191 * indexes in this structure are measured in PAGE_SIZE units, are 0 based from
192 * the start of the uptr and extend to npages. pages are pinned dynamically
193 * according to the intervals in the access_itree and domains_itree, npinned
194 * records the current number of pages pinned.
195 */
196 struct iopt_pages {
197 struct kref kref;
198 struct mutex mutex;
199 size_t npages;
200 size_t npinned;
201 size_t last_npinned;
202 struct task_struct *source_task;
203 struct mm_struct *source_mm;
204 struct user_struct *source_user;
205 enum iopt_address_type type;
206 union {
207 void __user *uptr; /* IOPT_ADDRESS_USER */
208 struct { /* IOPT_ADDRESS_FILE */
209 struct file *file;
210 unsigned long start;
211 };
212 };
213 bool writable:1;
214 u8 account_mode;
215
216 struct xarray pinned_pfns;
217 /* Of iopt_pages_access::node */
218 struct rb_root_cached access_itree;
219 /* Of iopt_area::pages_node */
220 struct rb_root_cached domains_itree;
221 };
222
223 struct iopt_pages *iopt_alloc_user_pages(void __user *uptr,
224 unsigned long length, bool writable);
225 struct iopt_pages *iopt_alloc_file_pages(struct file *file, unsigned long start,
226 unsigned long length, bool writable);
227 void iopt_release_pages(struct kref *kref);
iopt_put_pages(struct iopt_pages * pages)228 static inline void iopt_put_pages(struct iopt_pages *pages)
229 {
230 kref_put(&pages->kref, iopt_release_pages);
231 }
232
233 void iopt_pages_fill_from_xarray(struct iopt_pages *pages, unsigned long start,
234 unsigned long last, struct page **out_pages);
235 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start,
236 unsigned long last, struct page **out_pages);
237 void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start,
238 unsigned long last);
239
240 int iopt_area_add_access(struct iopt_area *area, unsigned long start,
241 unsigned long last, struct page **out_pages,
242 unsigned int flags, bool lock_area);
243 void iopt_area_remove_access(struct iopt_area *area, unsigned long start,
244 unsigned long last, bool unlock_area);
245 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
246 void *data, unsigned long length, unsigned int flags);
247
248 /*
249 * Each interval represents an active iopt_access_pages(), it acts as an
250 * interval lock that keeps the PFNs pinned and stored in the xarray.
251 */
252 struct iopt_pages_access {
253 struct interval_tree_node node;
254 unsigned int users;
255 };
256
257 struct pfn_reader_user;
258
259 int iopt_pages_update_pinned(struct iopt_pages *pages, unsigned long npages,
260 bool inc, struct pfn_reader_user *user);
261
262 #endif
263