1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2021 Intel Corporation
4  * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
5  */
6 #ifndef __LINUX_IOMMUFD_H
7 #define __LINUX_IOMMUFD_H
8 
9 #include <linux/err.h>
10 #include <linux/errno.h>
11 #include <linux/iommu.h>
12 #include <linux/refcount.h>
13 #include <linux/types.h>
14 #include <linux/xarray.h>
15 #include <uapi/linux/iommufd.h>
16 
17 struct device;
18 struct file;
19 struct iommu_group;
20 struct iommu_user_data;
21 struct iommu_user_data_array;
22 struct iommufd_access;
23 struct iommufd_ctx;
24 struct iommufd_device;
25 struct iommufd_viommu_ops;
26 struct page;
27 
28 enum iommufd_object_type {
29 	IOMMUFD_OBJ_NONE,
30 	IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE,
31 	IOMMUFD_OBJ_DEVICE,
32 	IOMMUFD_OBJ_HWPT_PAGING,
33 	IOMMUFD_OBJ_HWPT_NESTED,
34 	IOMMUFD_OBJ_IOAS,
35 	IOMMUFD_OBJ_ACCESS,
36 	IOMMUFD_OBJ_FAULT,
37 	IOMMUFD_OBJ_VIOMMU,
38 	IOMMUFD_OBJ_VDEVICE,
39 	IOMMUFD_OBJ_VEVENTQ,
40 #ifdef CONFIG_IOMMUFD_TEST
41 	IOMMUFD_OBJ_SELFTEST,
42 #endif
43 	IOMMUFD_OBJ_MAX,
44 };
45 
46 /* Base struct for all objects with a userspace ID handle. */
47 struct iommufd_object {
48 	refcount_t shortterm_users;
49 	refcount_t users;
50 	enum iommufd_object_type type;
51 	unsigned int id;
52 };
53 
54 struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
55 					   struct device *dev, u32 *id);
56 void iommufd_device_unbind(struct iommufd_device *idev);
57 
58 int iommufd_device_attach(struct iommufd_device *idev, ioasid_t pasid,
59 			  u32 *pt_id);
60 int iommufd_device_replace(struct iommufd_device *idev, ioasid_t pasid,
61 			   u32 *pt_id);
62 void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid);
63 
64 struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
65 u32 iommufd_device_to_id(struct iommufd_device *idev);
66 
67 struct iommufd_access_ops {
68 	u8 needs_pin_pages : 1;
69 	void (*unmap)(void *data, unsigned long iova, unsigned long length);
70 };
71 
72 enum {
73 	IOMMUFD_ACCESS_RW_READ = 0,
74 	IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
75 	/* Set if the caller is in a kthread then rw will use kthread_use_mm() */
76 	IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
77 
78 	/* Only for use by selftest */
79 	__IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
80 };
81 
82 struct iommufd_access *
83 iommufd_access_create(struct iommufd_ctx *ictx,
84 		      const struct iommufd_access_ops *ops, void *data, u32 *id);
85 void iommufd_access_destroy(struct iommufd_access *access);
86 int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
87 int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
88 void iommufd_access_detach(struct iommufd_access *access);
89 
90 void iommufd_ctx_get(struct iommufd_ctx *ictx);
91 
92 struct iommufd_viommu {
93 	struct iommufd_object obj;
94 	struct iommufd_ctx *ictx;
95 	struct iommu_device *iommu_dev;
96 	struct iommufd_hwpt_paging *hwpt;
97 
98 	const struct iommufd_viommu_ops *ops;
99 
100 	struct xarray vdevs;
101 	struct list_head veventqs;
102 	struct rw_semaphore veventqs_rwsem;
103 
104 	unsigned int type;
105 };
106 
107 /**
108  * struct iommufd_viommu_ops - vIOMMU specific operations
109  * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory
110  *           of the vIOMMU will be free-ed by iommufd core after calling this op
111  * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
112  *                       nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data
113  *                       must be defined in include/uapi/linux/iommufd.h.
114  *                       It must fully initialize the new iommu_domain before
115  *                       returning. Upon failure, ERR_PTR must be returned.
116  * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
117  *                    any IOMMU hardware specific cache: TLB and device cache.
118  *                    The @array passes in the cache invalidation requests, in
119  *                    form of a driver data structure. A driver must update the
120  *                    array->entry_num to report the number of handled requests.
121  *                    The data structure of the array entry must be defined in
122  *                    include/uapi/linux/iommufd.h
123  */
124 struct iommufd_viommu_ops {
125 	void (*destroy)(struct iommufd_viommu *viommu);
126 	struct iommu_domain *(*alloc_domain_nested)(
127 		struct iommufd_viommu *viommu, u32 flags,
128 		const struct iommu_user_data *user_data);
129 	int (*cache_invalidate)(struct iommufd_viommu *viommu,
130 				struct iommu_user_data_array *array);
131 };
132 
133 #if IS_ENABLED(CONFIG_IOMMUFD)
134 struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
135 struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
136 void iommufd_ctx_put(struct iommufd_ctx *ictx);
137 bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
138 
139 int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
140 			     unsigned long length, struct page **out_pages,
141 			     unsigned int flags);
142 void iommufd_access_unpin_pages(struct iommufd_access *access,
143 				unsigned long iova, unsigned long length);
144 int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
145 		      void *data, size_t len, unsigned int flags);
146 int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
147 int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
148 int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
149 #else /* !CONFIG_IOMMUFD */
iommufd_ctx_from_file(struct file * file)150 static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
151 {
152 	return ERR_PTR(-EOPNOTSUPP);
153 }
154 
iommufd_ctx_put(struct iommufd_ctx * ictx)155 static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
156 {
157 }
158 
iommufd_access_pin_pages(struct iommufd_access * access,unsigned long iova,unsigned long length,struct page ** out_pages,unsigned int flags)159 static inline int iommufd_access_pin_pages(struct iommufd_access *access,
160 					   unsigned long iova,
161 					   unsigned long length,
162 					   struct page **out_pages,
163 					   unsigned int flags)
164 {
165 	return -EOPNOTSUPP;
166 }
167 
iommufd_access_unpin_pages(struct iommufd_access * access,unsigned long iova,unsigned long length)168 static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
169 					      unsigned long iova,
170 					      unsigned long length)
171 {
172 }
173 
iommufd_access_rw(struct iommufd_access * access,unsigned long iova,void * data,size_t len,unsigned int flags)174 static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
175 		      void *data, size_t len, unsigned int flags)
176 {
177 	return -EOPNOTSUPP;
178 }
179 
iommufd_vfio_compat_ioas_create(struct iommufd_ctx * ictx)180 static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
181 {
182 	return -EOPNOTSUPP;
183 }
184 
iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx * ictx)185 static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
186 {
187 	return -EOPNOTSUPP;
188 }
189 #endif /* CONFIG_IOMMUFD */
190 
191 #if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
192 struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
193 					     size_t size,
194 					     enum iommufd_object_type type);
195 struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
196 				       unsigned long vdev_id);
197 int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
198 			       struct device *dev, unsigned long *vdev_id);
199 int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
200 				enum iommu_veventq_type type, void *event_data,
201 				size_t data_len);
202 #else /* !CONFIG_IOMMUFD_DRIVER_CORE */
203 static inline struct iommufd_object *
_iommufd_object_alloc(struct iommufd_ctx * ictx,size_t size,enum iommufd_object_type type)204 _iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size,
205 		      enum iommufd_object_type type)
206 {
207 	return ERR_PTR(-EOPNOTSUPP);
208 }
209 
210 static inline struct device *
iommufd_viommu_find_dev(struct iommufd_viommu * viommu,unsigned long vdev_id)211 iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
212 {
213 	return NULL;
214 }
215 
iommufd_viommu_get_vdev_id(struct iommufd_viommu * viommu,struct device * dev,unsigned long * vdev_id)216 static inline int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
217 					     struct device *dev,
218 					     unsigned long *vdev_id)
219 {
220 	return -ENOENT;
221 }
222 
iommufd_viommu_report_event(struct iommufd_viommu * viommu,enum iommu_veventq_type type,void * event_data,size_t data_len)223 static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
224 					      enum iommu_veventq_type type,
225 					      void *event_data, size_t data_len)
226 {
227 	return -EOPNOTSUPP;
228 }
229 #endif /* CONFIG_IOMMUFD_DRIVER_CORE */
230 
231 /*
232  * Helpers for IOMMU driver to allocate driver structures that will be freed by
233  * the iommufd core. The free op will be called prior to freeing the memory.
234  */
235 #define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops)             \
236 	({                                                                     \
237 		drv_struct *ret;                                               \
238 									       \
239 		static_assert(__same_type(struct iommufd_viommu,               \
240 					  ((drv_struct *)NULL)->member));      \
241 		static_assert(offsetof(drv_struct, member.obj) == 0);          \
242 		ret = (drv_struct *)_iommufd_object_alloc(                     \
243 			ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU);         \
244 		if (!IS_ERR(ret))                                              \
245 			ret->member.ops = viommu_ops;                          \
246 		ret;                                                           \
247 	})
248 #endif
249