1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 */
6
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17 #include <uapi/linux/iommufd.h>
18
19 #define IOMMU_READ (1 << 0)
20 #define IOMMU_WRITE (1 << 1)
21 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
22 #define IOMMU_NOEXEC (1 << 3)
23 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
24 /*
25 * Where the bus hardware includes a privilege level as part of its access type
26 * markings, and certain devices are capable of issuing transactions marked as
27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28 * given permission flags only apply to accesses at the higher privilege level,
29 * and that unprivileged transactions should have as little access as possible.
30 * This would usually imply the same permissions as kernel mappings on the CPU,
31 * if the IOMMU page table format is equivalent.
32 */
33 #define IOMMU_PRIV (1 << 5)
34
35 struct iommu_ops;
36 struct iommu_group;
37 struct bus_type;
38 struct device;
39 struct iommu_domain;
40 struct iommu_domain_ops;
41 struct iommu_dirty_ops;
42 struct notifier_block;
43 struct iommu_sva;
44 struct iommu_dma_cookie;
45 struct iommu_dma_msi_cookie;
46 struct iommu_fault_param;
47 struct iommufd_ctx;
48 struct iommufd_viommu;
49 struct msi_desc;
50 struct msi_msg;
51
52 #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
53 #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
54 #define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
55 #define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
56
57 /* Generic fault types, can be expanded IRQ remapping fault */
58 enum iommu_fault_type {
59 IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */
60 };
61
62 /**
63 * struct iommu_fault_page_request - Page Request data
64 * @flags: encodes whether the corresponding fields are valid and whether this
65 * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
66 * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
67 * must have the same PASID value as the page request. When it is clear,
68 * the page response should not have a PASID.
69 * @pasid: Process Address Space ID
70 * @grpid: Page Request Group Index
71 * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
72 * @addr: page address
73 * @private_data: device-specific private information
74 */
75 struct iommu_fault_page_request {
76 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
77 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
78 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
79 u32 flags;
80 u32 pasid;
81 u32 grpid;
82 u32 perm;
83 u64 addr;
84 u64 private_data[2];
85 };
86
87 /**
88 * struct iommu_fault - Generic fault data
89 * @type: fault type from &enum iommu_fault_type
90 * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
91 */
92 struct iommu_fault {
93 u32 type;
94 struct iommu_fault_page_request prm;
95 };
96
97 /**
98 * enum iommu_page_response_code - Return status of fault handlers
99 * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
100 * populated, retry the access. This is "Success" in PCI PRI.
101 * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
102 * this device if possible. This is "Response Failure" in PCI PRI.
103 * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
104 * access. This is "Invalid Request" in PCI PRI.
105 */
106 enum iommu_page_response_code {
107 IOMMU_PAGE_RESP_SUCCESS = 0,
108 IOMMU_PAGE_RESP_INVALID,
109 IOMMU_PAGE_RESP_FAILURE,
110 };
111
112 /**
113 * struct iommu_page_response - Generic page response information
114 * @pasid: Process Address Space ID
115 * @grpid: Page Request Group Index
116 * @code: response code from &enum iommu_page_response_code
117 */
118 struct iommu_page_response {
119 u32 pasid;
120 u32 grpid;
121 u32 code;
122 };
123
124 struct iopf_fault {
125 struct iommu_fault fault;
126 /* node for pending lists */
127 struct list_head list;
128 };
129
130 struct iopf_group {
131 struct iopf_fault last_fault;
132 struct list_head faults;
133 size_t fault_count;
134 /* list node for iommu_fault_param::faults */
135 struct list_head pending_node;
136 struct work_struct work;
137 struct iommu_attach_handle *attach_handle;
138 /* The device's fault data parameter. */
139 struct iommu_fault_param *fault_param;
140 /* Used by handler provider to hook the group on its own lists. */
141 struct list_head node;
142 u32 cookie;
143 };
144
145 /**
146 * struct iopf_queue - IO Page Fault queue
147 * @wq: the fault workqueue
148 * @devices: devices attached to this queue
149 * @lock: protects the device list
150 */
151 struct iopf_queue {
152 struct workqueue_struct *wq;
153 struct list_head devices;
154 struct mutex lock;
155 };
156
157 /* iommu fault flags */
158 #define IOMMU_FAULT_READ 0x0
159 #define IOMMU_FAULT_WRITE 0x1
160
161 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
162 struct device *, unsigned long, int, void *);
163
164 struct iommu_domain_geometry {
165 dma_addr_t aperture_start; /* First address that can be mapped */
166 dma_addr_t aperture_end; /* Last address that can be mapped */
167 bool force_aperture; /* DMA only allowed in mappable range? */
168 };
169
170 enum iommu_domain_cookie_type {
171 IOMMU_COOKIE_NONE,
172 IOMMU_COOKIE_DMA_IOVA,
173 IOMMU_COOKIE_DMA_MSI,
174 IOMMU_COOKIE_FAULT_HANDLER,
175 IOMMU_COOKIE_SVA,
176 IOMMU_COOKIE_IOMMUFD,
177 };
178
179 /* Domain feature flags */
180 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
181 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
182 implementation */
183 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
184 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
185
186 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
187 #define __IOMMU_DOMAIN_PLATFORM (1U << 5)
188
189 #define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
190 on a stage-2 translation */
191
192 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
193 /*
194 * This are the possible domain-types
195 *
196 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
197 * devices
198 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
199 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
200 * for VMs
201 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
202 * This flag allows IOMMU drivers to implement
203 * certain optimizations for these domains
204 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
205 * invalidation.
206 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
207 * represented by mm_struct's.
208 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
209 * dma_api stuff. Do not use in new drivers.
210 */
211 #define IOMMU_DOMAIN_BLOCKED (0U)
212 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
213 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
214 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
215 __IOMMU_DOMAIN_DMA_API)
216 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
217 __IOMMU_DOMAIN_DMA_API | \
218 __IOMMU_DOMAIN_DMA_FQ)
219 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
220 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
221 #define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
222
223 struct iommu_domain {
224 unsigned type;
225 enum iommu_domain_cookie_type cookie_type;
226 const struct iommu_domain_ops *ops;
227 const struct iommu_dirty_ops *dirty_ops;
228 const struct iommu_ops *owner; /* Whose domain_alloc we came from */
229 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
230 struct iommu_domain_geometry geometry;
231 int (*iopf_handler)(struct iopf_group *group);
232
233 union { /* cookie */
234 struct iommu_dma_cookie *iova_cookie;
235 struct iommu_dma_msi_cookie *msi_cookie;
236 struct iommufd_hw_pagetable *iommufd_hwpt;
237 struct {
238 iommu_fault_handler_t handler;
239 void *handler_token;
240 };
241 struct { /* IOMMU_DOMAIN_SVA */
242 struct mm_struct *mm;
243 int users;
244 /*
245 * Next iommu_domain in mm->iommu_mm->sva-domains list
246 * protected by iommu_sva_lock.
247 */
248 struct list_head next;
249 };
250 };
251 };
252
iommu_is_dma_domain(struct iommu_domain * domain)253 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
254 {
255 return domain->type & __IOMMU_DOMAIN_DMA_API;
256 }
257
258 enum iommu_cap {
259 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
260 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
261 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
262 DMA protection and we should too */
263 /*
264 * Per-device flag indicating if enforce_cache_coherency() will work on
265 * this device.
266 */
267 IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
268 /*
269 * IOMMU driver does not issue TLB maintenance during .unmap, so can
270 * usefully support the non-strict DMA flush queue.
271 */
272 IOMMU_CAP_DEFERRED_FLUSH,
273 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
274 };
275
276 /* These are the possible reserved region types */
277 enum iommu_resv_type {
278 /* Memory regions which must be mapped 1:1 at all times */
279 IOMMU_RESV_DIRECT,
280 /*
281 * Memory regions which are advertised to be 1:1 but are
282 * commonly considered relaxable in some conditions,
283 * for instance in device assignment use case (USB, Graphics)
284 */
285 IOMMU_RESV_DIRECT_RELAXABLE,
286 /* Arbitrary "never map this or give it to a device" address ranges */
287 IOMMU_RESV_RESERVED,
288 /* Hardware MSI region (untranslated) */
289 IOMMU_RESV_MSI,
290 /* Software-managed MSI translation window */
291 IOMMU_RESV_SW_MSI,
292 };
293
294 /**
295 * struct iommu_resv_region - descriptor for a reserved memory region
296 * @list: Linked list pointers
297 * @start: System physical start address of the region
298 * @length: Length of the region in bytes
299 * @prot: IOMMU Protection flags (READ/WRITE/...)
300 * @type: Type of the reserved region
301 * @free: Callback to free associated memory allocations
302 */
303 struct iommu_resv_region {
304 struct list_head list;
305 phys_addr_t start;
306 size_t length;
307 int prot;
308 enum iommu_resv_type type;
309 void (*free)(struct device *dev, struct iommu_resv_region *region);
310 };
311
312 struct iommu_iort_rmr_data {
313 struct iommu_resv_region rr;
314
315 /* Stream IDs associated with IORT RMR entry */
316 const u32 *sids;
317 u32 num_sids;
318 };
319
320 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
321 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
322 #define IOMMU_PASID_INVALID (-1U)
323 typedef unsigned int ioasid_t;
324
325 /* Read but do not clear any dirty bits */
326 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
327
328 /*
329 * Pages allocated through iommu_alloc_pages_node_sz() can be placed on this
330 * list using iommu_pages_list_add(). Note: ONLY pages from
331 * iommu_alloc_pages_node_sz() can be used this way!
332 */
333 struct iommu_pages_list {
334 struct list_head pages;
335 };
336
337 #define IOMMU_PAGES_LIST_INIT(name) \
338 ((struct iommu_pages_list){ .pages = LIST_HEAD_INIT(name.pages) })
339
340 #ifdef CONFIG_IOMMU_API
341
342 /**
343 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
344 *
345 * @start: IOVA representing the start of the range to be flushed
346 * @end: IOVA representing the end of the range to be flushed (inclusive)
347 * @pgsize: The interval at which to perform the flush
348 * @freelist: Removed pages to free after sync
349 * @queued: Indicates that the flush will be queued
350 *
351 * This structure is intended to be updated by multiple calls to the
352 * ->unmap() function in struct iommu_ops before eventually being passed
353 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
354 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
355 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
356 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
357 */
358 struct iommu_iotlb_gather {
359 unsigned long start;
360 unsigned long end;
361 size_t pgsize;
362 struct iommu_pages_list freelist;
363 bool queued;
364 };
365
366 /**
367 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
368 * @bitmap: IOVA bitmap
369 * @gather: Range information for a pending IOTLB flush
370 */
371 struct iommu_dirty_bitmap {
372 struct iova_bitmap *bitmap;
373 struct iommu_iotlb_gather *gather;
374 };
375
376 /**
377 * struct iommu_dirty_ops - domain specific dirty tracking operations
378 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
379 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
380 * into a bitmap, with a bit represented as a page.
381 * Reads the dirty PTE bits and clears it from IO
382 * pagetables.
383 */
384 struct iommu_dirty_ops {
385 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
386 int (*read_and_clear_dirty)(struct iommu_domain *domain,
387 unsigned long iova, size_t size,
388 unsigned long flags,
389 struct iommu_dirty_bitmap *dirty);
390 };
391
392 /**
393 * struct iommu_user_data - iommu driver specific user space data info
394 * @type: The data type of the user buffer
395 * @uptr: Pointer to the user buffer for copy_from_user()
396 * @len: The length of the user buffer in bytes
397 *
398 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
399 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
400 */
401 struct iommu_user_data {
402 unsigned int type;
403 void __user *uptr;
404 size_t len;
405 };
406
407 /**
408 * struct iommu_user_data_array - iommu driver specific user space data array
409 * @type: The data type of all the entries in the user buffer array
410 * @uptr: Pointer to the user buffer array
411 * @entry_len: The fixed-width length of an entry in the array, in bytes
412 * @entry_num: The number of total entries in the array
413 *
414 * The user buffer includes an array of requests with format defined in
415 * include/uapi/linux/iommufd.h
416 */
417 struct iommu_user_data_array {
418 unsigned int type;
419 void __user *uptr;
420 size_t entry_len;
421 u32 entry_num;
422 };
423
424 /**
425 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
426 * @dst_data: Pointer to an iommu driver specific user data that is defined in
427 * include/uapi/linux/iommufd.h
428 * @src_data: Pointer to a struct iommu_user_data for user space data info
429 * @data_type: The data type of the @dst_data. Must match with @src_data.type
430 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
431 * @min_len: Initial length of user data structure for backward compatibility.
432 * This should be offsetofend using the last member in the user data
433 * struct that was initially added to include/uapi/linux/iommufd.h
434 */
__iommu_copy_struct_from_user(void * dst_data,const struct iommu_user_data * src_data,unsigned int data_type,size_t data_len,size_t min_len)435 static inline int __iommu_copy_struct_from_user(
436 void *dst_data, const struct iommu_user_data *src_data,
437 unsigned int data_type, size_t data_len, size_t min_len)
438 {
439 if (WARN_ON(!dst_data || !src_data))
440 return -EINVAL;
441 if (src_data->type != data_type)
442 return -EINVAL;
443 if (src_data->len < min_len || data_len < src_data->len)
444 return -EINVAL;
445 return copy_struct_from_user(dst_data, data_len, src_data->uptr,
446 src_data->len);
447 }
448
449 /**
450 * iommu_copy_struct_from_user - Copy iommu driver specific user space data
451 * @kdst: Pointer to an iommu driver specific user data that is defined in
452 * include/uapi/linux/iommufd.h
453 * @user_data: Pointer to a struct iommu_user_data for user space data info
454 * @data_type: The data type of the @kdst. Must match with @user_data->type
455 * @min_last: The last member of the data structure @kdst points in the initial
456 * version.
457 * Return 0 for success, otherwise -error.
458 */
459 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
460 __iommu_copy_struct_from_user(kdst, user_data, data_type, \
461 sizeof(*kdst), \
462 offsetofend(typeof(*kdst), min_last))
463
464 /**
465 * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
466 * data from an iommu_user_data_array
467 * @dst_data: Pointer to an iommu driver specific user data that is defined in
468 * include/uapi/linux/iommufd.h
469 * @src_array: Pointer to a struct iommu_user_data_array for a user space array
470 * @data_type: The data type of the @dst_data. Must match with @src_array.type
471 * @index: Index to the location in the array to copy user data from
472 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
473 * @min_len: Initial length of user data structure for backward compatibility.
474 * This should be offsetofend using the last member in the user data
475 * struct that was initially added to include/uapi/linux/iommufd.h
476 */
__iommu_copy_struct_from_user_array(void * dst_data,const struct iommu_user_data_array * src_array,unsigned int data_type,unsigned int index,size_t data_len,size_t min_len)477 static inline int __iommu_copy_struct_from_user_array(
478 void *dst_data, const struct iommu_user_data_array *src_array,
479 unsigned int data_type, unsigned int index, size_t data_len,
480 size_t min_len)
481 {
482 struct iommu_user_data src_data;
483
484 if (WARN_ON(!src_array || index >= src_array->entry_num))
485 return -EINVAL;
486 if (!src_array->entry_num)
487 return -EINVAL;
488 src_data.uptr = src_array->uptr + src_array->entry_len * index;
489 src_data.len = src_array->entry_len;
490 src_data.type = src_array->type;
491
492 return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
493 data_len, min_len);
494 }
495
496 /**
497 * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
498 * data from an iommu_user_data_array
499 * @kdst: Pointer to an iommu driver specific user data that is defined in
500 * include/uapi/linux/iommufd.h
501 * @user_array: Pointer to a struct iommu_user_data_array for a user space
502 * array
503 * @data_type: The data type of the @kdst. Must match with @user_array->type
504 * @index: Index to the location in the array to copy user data from
505 * @min_last: The last member of the data structure @kdst points in the
506 * initial version.
507 *
508 * Copy a single entry from a user array. Return 0 for success, otherwise
509 * -error.
510 */
511 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
512 min_last) \
513 __iommu_copy_struct_from_user_array( \
514 kdst, user_array, data_type, index, sizeof(*(kdst)), \
515 offsetofend(typeof(*(kdst)), min_last))
516
517 /**
518 * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
519 * space data from an iommu_user_data_array
520 * @kdst: Pointer to an iommu driver specific user data that is defined in
521 * include/uapi/linux/iommufd.h
522 * @kdst_entry_size: sizeof(*kdst)
523 * @user_array: Pointer to a struct iommu_user_data_array for a user space
524 * array
525 * @data_type: The data type of the @kdst. Must match with @user_array->type
526 *
527 * Copy the entire user array. kdst must have room for kdst_entry_size *
528 * user_array->entry_num bytes. Return 0 for success, otherwise -error.
529 */
530 static inline int
iommu_copy_struct_from_full_user_array(void * kdst,size_t kdst_entry_size,struct iommu_user_data_array * user_array,unsigned int data_type)531 iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
532 struct iommu_user_data_array *user_array,
533 unsigned int data_type)
534 {
535 unsigned int i;
536 int ret;
537
538 if (user_array->type != data_type)
539 return -EINVAL;
540 if (!user_array->entry_num)
541 return -EINVAL;
542 if (likely(user_array->entry_len == kdst_entry_size)) {
543 if (copy_from_user(kdst, user_array->uptr,
544 user_array->entry_num *
545 user_array->entry_len))
546 return -EFAULT;
547 }
548
549 /* Copy item by item */
550 for (i = 0; i != user_array->entry_num; i++) {
551 ret = copy_struct_from_user(
552 kdst + kdst_entry_size * i, kdst_entry_size,
553 user_array->uptr + user_array->entry_len * i,
554 user_array->entry_len);
555 if (ret)
556 return ret;
557 }
558 return 0;
559 }
560
561 /**
562 * __iommu_copy_struct_to_user - Report iommu driver specific user space data
563 * @dst_data: Pointer to a struct iommu_user_data for user space data location
564 * @src_data: Pointer to an iommu driver specific user data that is defined in
565 * include/uapi/linux/iommufd.h
566 * @data_type: The data type of the @src_data. Must match with @dst_data.type
567 * @data_len: Length of current user data structure, i.e. sizeof(struct _src)
568 * @min_len: Initial length of user data structure for backward compatibility.
569 * This should be offsetofend using the last member in the user data
570 * struct that was initially added to include/uapi/linux/iommufd.h
571 */
572 static inline int
__iommu_copy_struct_to_user(const struct iommu_user_data * dst_data,void * src_data,unsigned int data_type,size_t data_len,size_t min_len)573 __iommu_copy_struct_to_user(const struct iommu_user_data *dst_data,
574 void *src_data, unsigned int data_type,
575 size_t data_len, size_t min_len)
576 {
577 if (WARN_ON(!dst_data || !src_data))
578 return -EINVAL;
579 if (dst_data->type != data_type)
580 return -EINVAL;
581 if (dst_data->len < min_len || data_len < dst_data->len)
582 return -EINVAL;
583 return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data,
584 data_len, NULL);
585 }
586
587 /**
588 * iommu_copy_struct_to_user - Report iommu driver specific user space data
589 * @user_data: Pointer to a struct iommu_user_data for user space data location
590 * @ksrc: Pointer to an iommu driver specific user data that is defined in
591 * include/uapi/linux/iommufd.h
592 * @data_type: The data type of the @ksrc. Must match with @user_data->type
593 * @min_last: The last member of the data structure @ksrc points in the initial
594 * version.
595 * Return 0 for success, otherwise -error.
596 */
597 #define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \
598 __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \
599 offsetofend(typeof(*ksrc), min_last))
600
601 /**
602 * struct iommu_ops - iommu ops and capabilities
603 * @capable: check capability
604 * @hw_info: report iommu hardware information. The data buffer returned by this
605 * op is allocated in the iommu driver and freed by the caller after
606 * use. @type can input a requested type and output a supported type.
607 * Driver should reject an unsupported data @type input
608 * @domain_alloc: Do not use in new drivers
609 * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
610 * use identity_domain instead. This should only be used
611 * if dynamic logic is necessary.
612 * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
613 * input parameters as defined in
614 * include/uapi/linux/iommufd.h. The @user_data can be
615 * optionally provided, the new domain must support
616 * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
617 * returned.
618 * @domain_alloc_paging: Allocate an iommu_domain that can be used for
619 * UNMANAGED, DMA, and DMA_FQ domain types. This is the
620 * same as invoking domain_alloc_paging_flags() with
621 * @flags=0, @user_data=NULL. A driver should implement
622 * only one of the two ops.
623 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
624 * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
625 * @probe_device: Add device to iommu driver handling
626 * @release_device: Remove device from iommu driver handling
627 * @probe_finalize: Do final setup work after the device is added to an IOMMU
628 * group and attached to the groups domain
629 * @device_group: find iommu group for a particular device
630 * @get_resv_regions: Request list of reserved regions for a device
631 * @of_xlate: add OF master IDs to iommu grouping
632 * @is_attach_deferred: Check if domain attach should be deferred from iommu
633 * driver init to device driver init (default no)
634 * @page_response: handle page request response
635 * @def_domain_type: device default domain type, return value:
636 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
637 * - IOMMU_DOMAIN_DMA: must use a dma domain
638 * - 0: use the default setting
639 * @default_domain_ops: the default ops for domains
640 * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given
641 * @dev corresponding to @viommu_type. Driver should return 0
642 * if vIOMMU isn't supported accordingly. It is required for
643 * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the
644 * driver-level vIOMMU structure related to the core one
645 * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical
646 * IOMMU instance @viommu->iommu_dev, as the set of virtualization
647 * resources shared/passed to user space IOMMU instance. Associate
648 * it with a nesting @parent_domain. It is required for driver to
649 * set @viommu->ops pointing to its own viommu_ops
650 * @owner: Driver module providing these ops
651 * @identity_domain: An always available, always attachable identity
652 * translation.
653 * @blocked_domain: An always available, always attachable blocking
654 * translation.
655 * @default_domain: If not NULL this will always be set as the default domain.
656 * This should be an IDENTITY/BLOCKED/PLATFORM domain.
657 * Do not use in new drivers.
658 * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
659 * no user domain for each PASID and the I/O page faults are
660 * forwarded through the user domain attached to the device
661 * RID.
662 */
663 struct iommu_ops {
664 bool (*capable)(struct device *dev, enum iommu_cap);
665 void *(*hw_info)(struct device *dev, u32 *length,
666 enum iommu_hw_info_type *type);
667
668 /* Domain allocation and freeing by the iommu driver */
669 #if IS_ENABLED(CONFIG_FSL_PAMU)
670 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
671 #endif
672 struct iommu_domain *(*domain_alloc_identity)(struct device *dev);
673 struct iommu_domain *(*domain_alloc_paging_flags)(
674 struct device *dev, u32 flags,
675 const struct iommu_user_data *user_data);
676 struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
677 struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
678 struct mm_struct *mm);
679 struct iommu_domain *(*domain_alloc_nested)(
680 struct device *dev, struct iommu_domain *parent, u32 flags,
681 const struct iommu_user_data *user_data);
682
683 struct iommu_device *(*probe_device)(struct device *dev);
684 void (*release_device)(struct device *dev);
685 void (*probe_finalize)(struct device *dev);
686 struct iommu_group *(*device_group)(struct device *dev);
687
688 /* Request/Free a list of reserved regions for a device */
689 void (*get_resv_regions)(struct device *dev, struct list_head *list);
690
691 int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
692 bool (*is_attach_deferred)(struct device *dev);
693
694 /* Per device IOMMU features */
695 void (*page_response)(struct device *dev, struct iopf_fault *evt,
696 struct iommu_page_response *msg);
697
698 int (*def_domain_type)(struct device *dev);
699
700 size_t (*get_viommu_size)(struct device *dev,
701 enum iommu_viommu_type viommu_type);
702 int (*viommu_init)(struct iommufd_viommu *viommu,
703 struct iommu_domain *parent_domain,
704 const struct iommu_user_data *user_data);
705
706 const struct iommu_domain_ops *default_domain_ops;
707 struct module *owner;
708 struct iommu_domain *identity_domain;
709 struct iommu_domain *blocked_domain;
710 struct iommu_domain *release_domain;
711 struct iommu_domain *default_domain;
712 u8 user_pasid_table:1;
713 };
714
715 /**
716 * struct iommu_domain_ops - domain specific operations
717 * @attach_dev: attach an iommu domain to a device
718 * Return:
719 * * 0 - success
720 * * EINVAL - can indicate that device and domain are incompatible due to
721 * some previous configuration of the domain, in which case the
722 * driver shouldn't log an error, since it is legitimate for a
723 * caller to test reuse of existing domains. Otherwise, it may
724 * still represent some other fundamental problem
725 * * ENOMEM - out of memory
726 * * ENOSPC - non-ENOMEM type of resource allocation failures
727 * * EBUSY - device is attached to a domain and cannot be changed
728 * * ENODEV - device specific errors, not able to be attached
729 * * <others> - treated as ENODEV by the caller. Use is discouraged
730 * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
731 * the device should be left in the old config in error case.
732 * @map_pages: map a physically contiguous set of pages of the same size to
733 * an iommu domain.
734 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
735 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
736 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
737 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
738 * queue
739 * @cache_invalidate_user: Flush hardware cache for user space IO page table.
740 * The @domain must be IOMMU_DOMAIN_NESTED. The @array
741 * passes in the cache invalidation requests, in form
742 * of a driver data structure. The driver must update
743 * array->entry_num to report the number of handled
744 * invalidation requests. The driver data structure
745 * must be defined in include/uapi/linux/iommufd.h
746 * @iova_to_phys: translate iova to physical address
747 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
748 * including no-snoop TLPs on PCIe or other platform
749 * specific mechanisms.
750 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
751 * @free: Release the domain after use.
752 */
753 struct iommu_domain_ops {
754 int (*attach_dev)(struct iommu_domain *domain, struct device *dev,
755 struct iommu_domain *old);
756 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
757 ioasid_t pasid, struct iommu_domain *old);
758
759 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
760 phys_addr_t paddr, size_t pgsize, size_t pgcount,
761 int prot, gfp_t gfp, size_t *mapped);
762 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
763 size_t pgsize, size_t pgcount,
764 struct iommu_iotlb_gather *iotlb_gather);
765
766 void (*flush_iotlb_all)(struct iommu_domain *domain);
767 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
768 size_t size);
769 void (*iotlb_sync)(struct iommu_domain *domain,
770 struct iommu_iotlb_gather *iotlb_gather);
771 int (*cache_invalidate_user)(struct iommu_domain *domain,
772 struct iommu_user_data_array *array);
773
774 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
775 dma_addr_t iova);
776
777 bool (*enforce_cache_coherency)(struct iommu_domain *domain);
778 int (*set_pgtable_quirks)(struct iommu_domain *domain,
779 unsigned long quirks);
780
781 void (*free)(struct iommu_domain *domain);
782 };
783
784 /**
785 * struct iommu_device - IOMMU core representation of one IOMMU hardware
786 * instance
787 * @list: Used by the iommu-core to keep a list of registered iommus
788 * @ops: iommu-ops for talking to this iommu
789 * @dev: struct device for sysfs handling
790 * @singleton_group: Used internally for drivers that have only one group
791 * @max_pasids: number of supported PASIDs
792 * @ready: set once iommu_device_register() has completed successfully
793 */
794 struct iommu_device {
795 struct list_head list;
796 const struct iommu_ops *ops;
797 struct fwnode_handle *fwnode;
798 struct device *dev;
799 struct iommu_group *singleton_group;
800 u32 max_pasids;
801 bool ready;
802 };
803
804 /**
805 * struct iommu_fault_param - per-device IOMMU fault data
806 * @lock: protect pending faults list
807 * @users: user counter to manage the lifetime of the data
808 * @rcu: rcu head for kfree_rcu()
809 * @dev: the device that owns this param
810 * @queue: IOPF queue
811 * @queue_list: index into queue->devices
812 * @partial: faults that are part of a Page Request Group for which the last
813 * request hasn't been submitted yet.
814 * @faults: holds the pending faults which need response
815 */
816 struct iommu_fault_param {
817 struct mutex lock;
818 refcount_t users;
819 struct rcu_head rcu;
820
821 struct device *dev;
822 struct iopf_queue *queue;
823 struct list_head queue_list;
824
825 struct list_head partial;
826 struct list_head faults;
827 };
828
829 /**
830 * struct dev_iommu - Collection of per-device IOMMU data
831 *
832 * @fault_param: IOMMU detected device fault reporting data
833 * @fwspec: IOMMU fwspec data
834 * @iommu_dev: IOMMU device this device is linked to
835 * @priv: IOMMU Driver private data
836 * @max_pasids: number of PASIDs this device can consume
837 * @attach_deferred: the dma domain attachment is deferred
838 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
839 * @require_direct: device requires IOMMU_RESV_DIRECT regions
840 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
841 *
842 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
843 * struct iommu_group *iommu_group;
844 */
845 struct dev_iommu {
846 struct mutex lock;
847 struct iommu_fault_param __rcu *fault_param;
848 struct iommu_fwspec *fwspec;
849 struct iommu_device *iommu_dev;
850 void *priv;
851 u32 max_pasids;
852 u32 attach_deferred:1;
853 u32 pci_32bit_workaround:1;
854 u32 require_direct:1;
855 u32 shadow_on_flush:1;
856 };
857
858 int iommu_device_register(struct iommu_device *iommu,
859 const struct iommu_ops *ops,
860 struct device *hwdev);
861 void iommu_device_unregister(struct iommu_device *iommu);
862 int iommu_device_sysfs_add(struct iommu_device *iommu,
863 struct device *parent,
864 const struct attribute_group **groups,
865 const char *fmt, ...) __printf(4, 5);
866 void iommu_device_sysfs_remove(struct iommu_device *iommu);
867 int iommu_device_link(struct iommu_device *iommu, struct device *link);
868 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
869 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
870
dev_to_iommu_device(struct device * dev)871 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
872 {
873 return (struct iommu_device *)dev_get_drvdata(dev);
874 }
875
876 /**
877 * iommu_get_iommu_dev - Get iommu_device for a device
878 * @dev: an end-point device
879 *
880 * Note that this function must be called from the iommu_ops
881 * to retrieve the iommu_device for a device, which the core code
882 * guarentees it will not invoke the op without an attached iommu.
883 */
__iommu_get_iommu_dev(struct device * dev)884 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
885 {
886 return dev->iommu->iommu_dev;
887 }
888
889 #define iommu_get_iommu_dev(dev, type, member) \
890 container_of(__iommu_get_iommu_dev(dev), type, member)
891
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)892 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
893 {
894 *gather = (struct iommu_iotlb_gather) {
895 .start = ULONG_MAX,
896 .freelist = IOMMU_PAGES_LIST_INIT(gather->freelist),
897 };
898 }
899
900 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
901 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
902 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
iommu_paging_domain_alloc(struct device * dev)903 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
904 {
905 return iommu_paging_domain_alloc_flags(dev, 0);
906 }
907 extern void iommu_domain_free(struct iommu_domain *domain);
908 extern int iommu_attach_device(struct iommu_domain *domain,
909 struct device *dev);
910 extern void iommu_detach_device(struct iommu_domain *domain,
911 struct device *dev);
912 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
913 struct iommu_domain *iommu_driver_get_domain_for_dev(struct device *dev);
914 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
915 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
916 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
917 int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
918 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
919 int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
920 size_t size);
921 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
922 size_t size);
923 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
924 unsigned long iova, size_t size,
925 struct iommu_iotlb_gather *iotlb_gather);
926 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
927 struct scatterlist *sg, unsigned int nents,
928 int prot, gfp_t gfp);
929 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
930 extern void iommu_set_fault_handler(struct iommu_domain *domain,
931 iommu_fault_handler_t handler, void *token);
932
933 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
934 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
935 extern void iommu_set_default_passthrough(bool cmd_line);
936 extern void iommu_set_default_translated(bool cmd_line);
937 extern bool iommu_default_passthrough(void);
938 extern struct iommu_resv_region *
939 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
940 enum iommu_resv_type type, gfp_t gfp);
941 extern int iommu_get_group_resv_regions(struct iommu_group *group,
942 struct list_head *head);
943
944 extern int iommu_attach_group(struct iommu_domain *domain,
945 struct iommu_group *group);
946 extern void iommu_detach_group(struct iommu_domain *domain,
947 struct iommu_group *group);
948 extern struct iommu_group *iommu_group_alloc(void);
949 extern void *iommu_group_get_iommudata(struct iommu_group *group);
950 extern void iommu_group_set_iommudata(struct iommu_group *group,
951 void *iommu_data,
952 void (*release)(void *iommu_data));
953 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
954 extern int iommu_group_add_device(struct iommu_group *group,
955 struct device *dev);
956 extern void iommu_group_remove_device(struct device *dev);
957 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
958 int (*fn)(struct device *, void *));
959 extern struct iommu_group *iommu_group_get(struct device *dev);
960 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
961 extern void iommu_group_put(struct iommu_group *group);
962
963 extern int iommu_group_id(struct iommu_group *group);
964 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
965
966 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
967 unsigned long quirks);
968
969 void iommu_set_dma_strict(void);
970
971 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
972 unsigned long iova, int flags);
973
iommu_flush_iotlb_all(struct iommu_domain * domain)974 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
975 {
976 if (domain->ops->flush_iotlb_all)
977 domain->ops->flush_iotlb_all(domain);
978 }
979
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)980 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
981 struct iommu_iotlb_gather *iotlb_gather)
982 {
983 if (domain->ops->iotlb_sync &&
984 likely(iotlb_gather->start < iotlb_gather->end))
985 domain->ops->iotlb_sync(domain, iotlb_gather);
986
987 iommu_iotlb_gather_init(iotlb_gather);
988 }
989
990 /**
991 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
992 *
993 * @gather: TLB gather data
994 * @iova: start of page to invalidate
995 * @size: size of page to invalidate
996 *
997 * Helper for IOMMU drivers to check whether a new range and the gathered range
998 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
999 * than merging the two, which might lead to unnecessary invalidations.
1000 */
1001 static inline
iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1002 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
1003 unsigned long iova, size_t size)
1004 {
1005 unsigned long start = iova, end = start + size - 1;
1006
1007 return gather->end != 0 &&
1008 (end + 1 < gather->start || start > gather->end + 1);
1009 }
1010
1011
1012 /**
1013 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
1014 * @gather: TLB gather data
1015 * @iova: start of page to invalidate
1016 * @size: size of page to invalidate
1017 *
1018 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
1019 * where only the address range matters, and simply minimising intermediate
1020 * syncs is preferred.
1021 */
iommu_iotlb_gather_add_range(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1022 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
1023 unsigned long iova, size_t size)
1024 {
1025 unsigned long end = iova + size - 1;
1026
1027 if (gather->start > iova)
1028 gather->start = iova;
1029 if (gather->end < end)
1030 gather->end = end;
1031 }
1032
1033 /**
1034 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
1035 * @domain: IOMMU domain to be invalidated
1036 * @gather: TLB gather data
1037 * @iova: start of page to invalidate
1038 * @size: size of page to invalidate
1039 *
1040 * Helper for IOMMU drivers to build invalidation commands based on individual
1041 * pages, or with page size/table level hints which cannot be gathered if they
1042 * differ.
1043 */
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1044 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1045 struct iommu_iotlb_gather *gather,
1046 unsigned long iova, size_t size)
1047 {
1048 /*
1049 * If the new page is disjoint from the current range or is mapped at
1050 * a different granularity, then sync the TLB so that the gather
1051 * structure can be rewritten.
1052 */
1053 if ((gather->pgsize && gather->pgsize != size) ||
1054 iommu_iotlb_gather_is_disjoint(gather, iova, size))
1055 iommu_iotlb_sync(domain, gather);
1056
1057 gather->pgsize = size;
1058 iommu_iotlb_gather_add_range(gather, iova, size);
1059 }
1060
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1061 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1062 {
1063 return gather && gather->queued;
1064 }
1065
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1066 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1067 struct iova_bitmap *bitmap,
1068 struct iommu_iotlb_gather *gather)
1069 {
1070 if (gather)
1071 iommu_iotlb_gather_init(gather);
1072
1073 dirty->bitmap = bitmap;
1074 dirty->gather = gather;
1075 }
1076
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1077 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1078 unsigned long iova,
1079 unsigned long length)
1080 {
1081 if (dirty->bitmap)
1082 iova_bitmap_set(dirty->bitmap, iova, length);
1083
1084 if (dirty->gather)
1085 iommu_iotlb_gather_add_range(dirty->gather, iova, length);
1086 }
1087
1088 /* PCI device grouping function */
1089 extern struct iommu_group *pci_device_group(struct device *dev);
1090 /* Generic device grouping function */
1091 extern struct iommu_group *generic_device_group(struct device *dev);
1092 /* FSL-MC device grouping function */
1093 struct iommu_group *fsl_mc_device_group(struct device *dev);
1094 extern struct iommu_group *generic_single_device_group(struct device *dev);
1095
1096 /**
1097 * struct iommu_fwspec - per-device IOMMU instance data
1098 * @iommu_fwnode: firmware handle for this device's IOMMU
1099 * @flags: IOMMU_FWSPEC_* flags
1100 * @num_ids: number of associated device IDs
1101 * @ids: IDs which this device may present to the IOMMU
1102 *
1103 * Note that the IDs (and any other information, really) stored in this structure should be
1104 * considered private to the IOMMU device driver and are not to be used directly by IOMMU
1105 * consumers.
1106 */
1107 struct iommu_fwspec {
1108 struct fwnode_handle *iommu_fwnode;
1109 u32 flags;
1110 unsigned int num_ids;
1111 u32 ids[];
1112 };
1113
1114 /* ATS is supported */
1115 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
1116 /* CANWBS is supported */
1117 #define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1)
1118
1119 /*
1120 * An iommu attach handle represents a relationship between an iommu domain
1121 * and a PASID or RID of a device. It is allocated and managed by the component
1122 * that manages the domain and is stored in the iommu group during the time the
1123 * domain is attached.
1124 */
1125 struct iommu_attach_handle {
1126 struct iommu_domain *domain;
1127 };
1128
1129 /**
1130 * struct iommu_sva - handle to a device-mm bond
1131 */
1132 struct iommu_sva {
1133 struct iommu_attach_handle handle;
1134 struct device *dev;
1135 refcount_t users;
1136 };
1137
1138 struct iommu_mm_data {
1139 u32 pasid;
1140 struct mm_struct *mm;
1141 struct list_head sva_domains;
1142 struct list_head mm_list_elm;
1143 };
1144
1145 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
1146 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1147
dev_iommu_fwspec_get(struct device * dev)1148 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1149 {
1150 if (dev->iommu)
1151 return dev->iommu->fwspec;
1152 else
1153 return NULL;
1154 }
1155
dev_iommu_fwspec_set(struct device * dev,struct iommu_fwspec * fwspec)1156 static inline void dev_iommu_fwspec_set(struct device *dev,
1157 struct iommu_fwspec *fwspec)
1158 {
1159 dev->iommu->fwspec = fwspec;
1160 }
1161
dev_iommu_priv_get(struct device * dev)1162 static inline void *dev_iommu_priv_get(struct device *dev)
1163 {
1164 if (dev->iommu)
1165 return dev->iommu->priv;
1166 else
1167 return NULL;
1168 }
1169
1170 void dev_iommu_priv_set(struct device *dev, void *priv);
1171
1172 extern struct mutex iommu_probe_device_lock;
1173 int iommu_probe_device(struct device *dev);
1174
1175 int iommu_device_use_default_domain(struct device *dev);
1176 void iommu_device_unuse_default_domain(struct device *dev);
1177
1178 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1179 void iommu_group_release_dma_owner(struct iommu_group *group);
1180 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1181
1182 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1183 void iommu_device_release_dma_owner(struct device *dev);
1184
1185 int iommu_attach_device_pasid(struct iommu_domain *domain,
1186 struct device *dev, ioasid_t pasid,
1187 struct iommu_attach_handle *handle);
1188 void iommu_detach_device_pasid(struct iommu_domain *domain,
1189 struct device *dev, ioasid_t pasid);
1190 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1191 void iommu_free_global_pasid(ioasid_t pasid);
1192
1193 /* PCI device reset functions */
1194 int pci_dev_reset_iommu_prepare(struct pci_dev *pdev);
1195 void pci_dev_reset_iommu_done(struct pci_dev *pdev);
1196 #else /* CONFIG_IOMMU_API */
1197
1198 struct iommu_ops {};
1199 struct iommu_group {};
1200 struct iommu_fwspec {};
1201 struct iommu_device {};
1202 struct iommu_fault_param {};
1203 struct iommu_iotlb_gather {};
1204 struct iommu_dirty_bitmap {};
1205 struct iommu_dirty_ops {};
1206
device_iommu_capable(struct device * dev,enum iommu_cap cap)1207 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1208 {
1209 return false;
1210 }
1211
iommu_paging_domain_alloc_flags(struct device * dev,unsigned int flags)1212 static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
1213 unsigned int flags)
1214 {
1215 return ERR_PTR(-ENODEV);
1216 }
1217
iommu_paging_domain_alloc(struct device * dev)1218 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
1219 {
1220 return ERR_PTR(-ENODEV);
1221 }
1222
iommu_domain_free(struct iommu_domain * domain)1223 static inline void iommu_domain_free(struct iommu_domain *domain)
1224 {
1225 }
1226
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1227 static inline int iommu_attach_device(struct iommu_domain *domain,
1228 struct device *dev)
1229 {
1230 return -ENODEV;
1231 }
1232
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1233 static inline void iommu_detach_device(struct iommu_domain *domain,
1234 struct device *dev)
1235 {
1236 }
1237
iommu_get_domain_for_dev(struct device * dev)1238 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1239 {
1240 return NULL;
1241 }
1242
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1243 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1244 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1245 {
1246 return -ENODEV;
1247 }
1248
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1249 static inline size_t iommu_unmap(struct iommu_domain *domain,
1250 unsigned long iova, size_t size)
1251 {
1252 return 0;
1253 }
1254
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,int gfp_order,struct iommu_iotlb_gather * iotlb_gather)1255 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1256 unsigned long iova, int gfp_order,
1257 struct iommu_iotlb_gather *iotlb_gather)
1258 {
1259 return 0;
1260 }
1261
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)1262 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1263 unsigned long iova, struct scatterlist *sg,
1264 unsigned int nents, int prot, gfp_t gfp)
1265 {
1266 return -ENODEV;
1267 }
1268
iommu_flush_iotlb_all(struct iommu_domain * domain)1269 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1270 {
1271 }
1272
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)1273 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1274 struct iommu_iotlb_gather *iotlb_gather)
1275 {
1276 }
1277
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1278 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1279 {
1280 return 0;
1281 }
1282
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1283 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1284 iommu_fault_handler_t handler, void *token)
1285 {
1286 }
1287
iommu_get_resv_regions(struct device * dev,struct list_head * list)1288 static inline void iommu_get_resv_regions(struct device *dev,
1289 struct list_head *list)
1290 {
1291 }
1292
iommu_put_resv_regions(struct device * dev,struct list_head * list)1293 static inline void iommu_put_resv_regions(struct device *dev,
1294 struct list_head *list)
1295 {
1296 }
1297
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)1298 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1299 struct list_head *head)
1300 {
1301 return -ENODEV;
1302 }
1303
iommu_set_default_passthrough(bool cmd_line)1304 static inline void iommu_set_default_passthrough(bool cmd_line)
1305 {
1306 }
1307
iommu_set_default_translated(bool cmd_line)1308 static inline void iommu_set_default_translated(bool cmd_line)
1309 {
1310 }
1311
iommu_default_passthrough(void)1312 static inline bool iommu_default_passthrough(void)
1313 {
1314 return true;
1315 }
1316
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1317 static inline int iommu_attach_group(struct iommu_domain *domain,
1318 struct iommu_group *group)
1319 {
1320 return -ENODEV;
1321 }
1322
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1323 static inline void iommu_detach_group(struct iommu_domain *domain,
1324 struct iommu_group *group)
1325 {
1326 }
1327
iommu_group_alloc(void)1328 static inline struct iommu_group *iommu_group_alloc(void)
1329 {
1330 return ERR_PTR(-ENODEV);
1331 }
1332
iommu_group_get_iommudata(struct iommu_group * group)1333 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1334 {
1335 return NULL;
1336 }
1337
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))1338 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1339 void *iommu_data,
1340 void (*release)(void *iommu_data))
1341 {
1342 }
1343
iommu_group_set_name(struct iommu_group * group,const char * name)1344 static inline int iommu_group_set_name(struct iommu_group *group,
1345 const char *name)
1346 {
1347 return -ENODEV;
1348 }
1349
iommu_group_add_device(struct iommu_group * group,struct device * dev)1350 static inline int iommu_group_add_device(struct iommu_group *group,
1351 struct device *dev)
1352 {
1353 return -ENODEV;
1354 }
1355
iommu_group_remove_device(struct device * dev)1356 static inline void iommu_group_remove_device(struct device *dev)
1357 {
1358 }
1359
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1360 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1361 void *data,
1362 int (*fn)(struct device *, void *))
1363 {
1364 return -ENODEV;
1365 }
1366
iommu_group_get(struct device * dev)1367 static inline struct iommu_group *iommu_group_get(struct device *dev)
1368 {
1369 return NULL;
1370 }
1371
iommu_group_put(struct iommu_group * group)1372 static inline void iommu_group_put(struct iommu_group *group)
1373 {
1374 }
1375
iommu_group_id(struct iommu_group * group)1376 static inline int iommu_group_id(struct iommu_group *group)
1377 {
1378 return -ENODEV;
1379 }
1380
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1381 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1382 unsigned long quirks)
1383 {
1384 return 0;
1385 }
1386
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)1387 static inline int iommu_device_register(struct iommu_device *iommu,
1388 const struct iommu_ops *ops,
1389 struct device *hwdev)
1390 {
1391 return -ENODEV;
1392 }
1393
dev_to_iommu_device(struct device * dev)1394 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1395 {
1396 return NULL;
1397 }
1398
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)1399 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1400 {
1401 }
1402
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1403 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1404 struct iommu_iotlb_gather *gather,
1405 unsigned long iova, size_t size)
1406 {
1407 }
1408
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1409 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1410 {
1411 return false;
1412 }
1413
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1414 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1415 struct iova_bitmap *bitmap,
1416 struct iommu_iotlb_gather *gather)
1417 {
1418 }
1419
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1420 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1421 unsigned long iova,
1422 unsigned long length)
1423 {
1424 }
1425
iommu_device_unregister(struct iommu_device * iommu)1426 static inline void iommu_device_unregister(struct iommu_device *iommu)
1427 {
1428 }
1429
iommu_device_sysfs_add(struct iommu_device * iommu,struct device * parent,const struct attribute_group ** groups,const char * fmt,...)1430 static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
1431 struct device *parent,
1432 const struct attribute_group **groups,
1433 const char *fmt, ...)
1434 {
1435 return -ENODEV;
1436 }
1437
iommu_device_sysfs_remove(struct iommu_device * iommu)1438 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1439 {
1440 }
1441
iommu_device_link(struct device * dev,struct device * link)1442 static inline int iommu_device_link(struct device *dev, struct device *link)
1443 {
1444 return -EINVAL;
1445 }
1446
iommu_device_unlink(struct device * dev,struct device * link)1447 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1448 {
1449 }
1450
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode)1451 static inline int iommu_fwspec_init(struct device *dev,
1452 struct fwnode_handle *iommu_fwnode)
1453 {
1454 return -ENODEV;
1455 }
1456
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1457 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1458 int num_ids)
1459 {
1460 return -ENODEV;
1461 }
1462
dev_iommu_fwspec_get(struct device * dev)1463 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1464 {
1465 return NULL;
1466 }
1467
iommu_device_use_default_domain(struct device * dev)1468 static inline int iommu_device_use_default_domain(struct device *dev)
1469 {
1470 return 0;
1471 }
1472
iommu_device_unuse_default_domain(struct device * dev)1473 static inline void iommu_device_unuse_default_domain(struct device *dev)
1474 {
1475 }
1476
1477 static inline int
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)1478 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1479 {
1480 return -ENODEV;
1481 }
1482
iommu_group_release_dma_owner(struct iommu_group * group)1483 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1484 {
1485 }
1486
iommu_group_dma_owner_claimed(struct iommu_group * group)1487 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1488 {
1489 return false;
1490 }
1491
iommu_device_release_dma_owner(struct device * dev)1492 static inline void iommu_device_release_dma_owner(struct device *dev)
1493 {
1494 }
1495
iommu_device_claim_dma_owner(struct device * dev,void * owner)1496 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1497 {
1498 return -ENODEV;
1499 }
1500
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_attach_handle * handle)1501 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1502 struct device *dev, ioasid_t pasid,
1503 struct iommu_attach_handle *handle)
1504 {
1505 return -ENODEV;
1506 }
1507
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)1508 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1509 struct device *dev, ioasid_t pasid)
1510 {
1511 }
1512
iommu_alloc_global_pasid(struct device * dev)1513 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1514 {
1515 return IOMMU_PASID_INVALID;
1516 }
1517
iommu_free_global_pasid(ioasid_t pasid)1518 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1519
pci_dev_reset_iommu_prepare(struct pci_dev * pdev)1520 static inline int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
1521 {
1522 return 0;
1523 }
1524
pci_dev_reset_iommu_done(struct pci_dev * pdev)1525 static inline void pci_dev_reset_iommu_done(struct pci_dev *pdev)
1526 {
1527 }
1528 #endif /* CONFIG_IOMMU_API */
1529
1530 #ifdef CONFIG_IRQ_MSI_IOMMU
1531 #ifdef CONFIG_IOMMU_API
1532 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1533 #else
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1534 static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
1535 phys_addr_t msi_addr)
1536 {
1537 return 0;
1538 }
1539 #endif /* CONFIG_IOMMU_API */
1540 #endif /* CONFIG_IRQ_MSI_IOMMU */
1541
1542 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1543 void iommu_group_mutex_assert(struct device *dev);
1544 #else
iommu_group_mutex_assert(struct device * dev)1545 static inline void iommu_group_mutex_assert(struct device *dev)
1546 {
1547 }
1548 #endif
1549
1550 /**
1551 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1552 * @domain: The IOMMU domain to perform the mapping
1553 * @iova: The start address to map the buffer
1554 * @sgt: The sg_table object describing the buffer
1555 * @prot: IOMMU protection bits
1556 *
1557 * Creates a mapping at @iova for the buffer described by a scatterlist
1558 * stored in the given sg_table object in the provided IOMMU domain.
1559 */
iommu_map_sgtable(struct iommu_domain * domain,unsigned long iova,struct sg_table * sgt,int prot)1560 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1561 unsigned long iova, struct sg_table *sgt, int prot)
1562 {
1563 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1564 GFP_KERNEL);
1565 }
1566
1567 #ifdef CONFIG_IOMMU_DEBUGFS
1568 extern struct dentry *iommu_debugfs_dir;
1569 void iommu_debugfs_setup(void);
1570 #else
iommu_debugfs_setup(void)1571 static inline void iommu_debugfs_setup(void) {}
1572 #endif
1573
1574 #ifdef CONFIG_IOMMU_DMA
1575 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1576 #else /* CONFIG_IOMMU_DMA */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)1577 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1578 {
1579 return -ENODEV;
1580 }
1581 #endif /* CONFIG_IOMMU_DMA */
1582
1583 /*
1584 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1585 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1586 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1587 */
1588 #define TEGRA_STREAM_ID_BYPASS 0x7f
1589
tegra_dev_iommu_get_stream_id(struct device * dev,u32 * stream_id)1590 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1591 {
1592 #ifdef CONFIG_IOMMU_API
1593 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1594
1595 if (fwspec && fwspec->num_ids == 1) {
1596 *stream_id = fwspec->ids[0] & 0xffff;
1597 return true;
1598 }
1599 #endif
1600
1601 return false;
1602 }
1603
1604 #ifdef CONFIG_IOMMU_MM_DATA
mm_pasid_init(struct mm_struct * mm)1605 static inline void mm_pasid_init(struct mm_struct *mm)
1606 {
1607 /*
1608 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1609 * the new mm and the old one point to a same iommu_mm instance. When either
1610 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1611 * the other mm running into a use-after-free/double-free problem. To avoid
1612 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1613 */
1614 mm->iommu_mm = NULL;
1615 }
1616
mm_valid_pasid(struct mm_struct * mm)1617 static inline bool mm_valid_pasid(struct mm_struct *mm)
1618 {
1619 return READ_ONCE(mm->iommu_mm);
1620 }
1621
mm_get_enqcmd_pasid(struct mm_struct * mm)1622 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1623 {
1624 struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1625
1626 if (!iommu_mm)
1627 return IOMMU_PASID_INVALID;
1628 return iommu_mm->pasid;
1629 }
1630
1631 void mm_pasid_drop(struct mm_struct *mm);
1632 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1633 struct mm_struct *mm);
1634 void iommu_sva_unbind_device(struct iommu_sva *handle);
1635 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1636 void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end);
1637 #else
1638 static inline struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)1639 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1640 {
1641 return ERR_PTR(-ENODEV);
1642 }
1643
iommu_sva_unbind_device(struct iommu_sva * handle)1644 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1645 {
1646 }
1647
iommu_sva_get_pasid(struct iommu_sva * handle)1648 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1649 {
1650 return IOMMU_PASID_INVALID;
1651 }
mm_pasid_init(struct mm_struct * mm)1652 static inline void mm_pasid_init(struct mm_struct *mm) {}
mm_valid_pasid(struct mm_struct * mm)1653 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1654
mm_get_enqcmd_pasid(struct mm_struct * mm)1655 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1656 {
1657 return IOMMU_PASID_INVALID;
1658 }
1659
mm_pasid_drop(struct mm_struct * mm)1660 static inline void mm_pasid_drop(struct mm_struct *mm) {}
iommu_sva_invalidate_kva_range(unsigned long start,unsigned long end)1661 static inline void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end) {}
1662 #endif /* CONFIG_IOMMU_SVA */
1663
1664 #ifdef CONFIG_IOMMU_IOPF
1665 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1666 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1667 int iopf_queue_flush_dev(struct device *dev);
1668 struct iopf_queue *iopf_queue_alloc(const char *name);
1669 void iopf_queue_free(struct iopf_queue *queue);
1670 int iopf_queue_discard_partial(struct iopf_queue *queue);
1671 void iopf_free_group(struct iopf_group *group);
1672 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1673 void iopf_group_response(struct iopf_group *group,
1674 enum iommu_page_response_code status);
1675 #else
1676 static inline int
iopf_queue_add_device(struct iopf_queue * queue,struct device * dev)1677 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1678 {
1679 return -ENODEV;
1680 }
1681
1682 static inline void
iopf_queue_remove_device(struct iopf_queue * queue,struct device * dev)1683 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1684 {
1685 }
1686
iopf_queue_flush_dev(struct device * dev)1687 static inline int iopf_queue_flush_dev(struct device *dev)
1688 {
1689 return -ENODEV;
1690 }
1691
iopf_queue_alloc(const char * name)1692 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1693 {
1694 return NULL;
1695 }
1696
iopf_queue_free(struct iopf_queue * queue)1697 static inline void iopf_queue_free(struct iopf_queue *queue)
1698 {
1699 }
1700
iopf_queue_discard_partial(struct iopf_queue * queue)1701 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1702 {
1703 return -ENODEV;
1704 }
1705
iopf_free_group(struct iopf_group * group)1706 static inline void iopf_free_group(struct iopf_group *group)
1707 {
1708 }
1709
1710 static inline int
iommu_report_device_fault(struct device * dev,struct iopf_fault * evt)1711 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1712 {
1713 return -ENODEV;
1714 }
1715
iopf_group_response(struct iopf_group * group,enum iommu_page_response_code status)1716 static inline void iopf_group_response(struct iopf_group *group,
1717 enum iommu_page_response_code status)
1718 {
1719 }
1720 #endif /* CONFIG_IOMMU_IOPF */
1721 #endif /* __LINUX_IOMMU_H */
1722