1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 */
6
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17
18 #define IOMMU_READ (1 << 0)
19 #define IOMMU_WRITE (1 << 1)
20 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
21 #define IOMMU_NOEXEC (1 << 3)
22 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
23 /*
24 * Where the bus hardware includes a privilege level as part of its access type
25 * markings, and certain devices are capable of issuing transactions marked as
26 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27 * given permission flags only apply to accesses at the higher privilege level,
28 * and that unprivileged transactions should have as little access as possible.
29 * This would usually imply the same permissions as kernel mappings on the CPU,
30 * if the IOMMU page table format is equivalent.
31 */
32 #define IOMMU_PRIV (1 << 5)
33
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct iommu_domain_ops;
40 struct iommu_dirty_ops;
41 struct notifier_block;
42 struct iommu_sva;
43 struct iommu_dma_cookie;
44 struct iommu_dma_msi_cookie;
45 struct iommu_fault_param;
46 struct iommufd_ctx;
47 struct iommufd_viommu;
48 struct msi_desc;
49 struct msi_msg;
50
51 #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
52 #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
53 #define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
54 #define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
55
56 /* Generic fault types, can be expanded IRQ remapping fault */
57 enum iommu_fault_type {
58 IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */
59 };
60
61 /**
62 * struct iommu_fault_page_request - Page Request data
63 * @flags: encodes whether the corresponding fields are valid and whether this
64 * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
65 * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
66 * must have the same PASID value as the page request. When it is clear,
67 * the page response should not have a PASID.
68 * @pasid: Process Address Space ID
69 * @grpid: Page Request Group Index
70 * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
71 * @addr: page address
72 * @private_data: device-specific private information
73 */
74 struct iommu_fault_page_request {
75 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
76 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
77 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
78 u32 flags;
79 u32 pasid;
80 u32 grpid;
81 u32 perm;
82 u64 addr;
83 u64 private_data[2];
84 };
85
86 /**
87 * struct iommu_fault - Generic fault data
88 * @type: fault type from &enum iommu_fault_type
89 * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
90 */
91 struct iommu_fault {
92 u32 type;
93 struct iommu_fault_page_request prm;
94 };
95
96 /**
97 * enum iommu_page_response_code - Return status of fault handlers
98 * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
99 * populated, retry the access. This is "Success" in PCI PRI.
100 * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
101 * this device if possible. This is "Response Failure" in PCI PRI.
102 * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
103 * access. This is "Invalid Request" in PCI PRI.
104 */
105 enum iommu_page_response_code {
106 IOMMU_PAGE_RESP_SUCCESS = 0,
107 IOMMU_PAGE_RESP_INVALID,
108 IOMMU_PAGE_RESP_FAILURE,
109 };
110
111 /**
112 * struct iommu_page_response - Generic page response information
113 * @pasid: Process Address Space ID
114 * @grpid: Page Request Group Index
115 * @code: response code from &enum iommu_page_response_code
116 */
117 struct iommu_page_response {
118 u32 pasid;
119 u32 grpid;
120 u32 code;
121 };
122
123 struct iopf_fault {
124 struct iommu_fault fault;
125 /* node for pending lists */
126 struct list_head list;
127 };
128
129 struct iopf_group {
130 struct iopf_fault last_fault;
131 struct list_head faults;
132 size_t fault_count;
133 /* list node for iommu_fault_param::faults */
134 struct list_head pending_node;
135 struct work_struct work;
136 struct iommu_attach_handle *attach_handle;
137 /* The device's fault data parameter. */
138 struct iommu_fault_param *fault_param;
139 /* Used by handler provider to hook the group on its own lists. */
140 struct list_head node;
141 u32 cookie;
142 };
143
144 /**
145 * struct iopf_queue - IO Page Fault queue
146 * @wq: the fault workqueue
147 * @devices: devices attached to this queue
148 * @lock: protects the device list
149 */
150 struct iopf_queue {
151 struct workqueue_struct *wq;
152 struct list_head devices;
153 struct mutex lock;
154 };
155
156 /* iommu fault flags */
157 #define IOMMU_FAULT_READ 0x0
158 #define IOMMU_FAULT_WRITE 0x1
159
160 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
161 struct device *, unsigned long, int, void *);
162
163 struct iommu_domain_geometry {
164 dma_addr_t aperture_start; /* First address that can be mapped */
165 dma_addr_t aperture_end; /* Last address that can be mapped */
166 bool force_aperture; /* DMA only allowed in mappable range? */
167 };
168
169 enum iommu_domain_cookie_type {
170 IOMMU_COOKIE_NONE,
171 IOMMU_COOKIE_DMA_IOVA,
172 IOMMU_COOKIE_DMA_MSI,
173 IOMMU_COOKIE_FAULT_HANDLER,
174 IOMMU_COOKIE_SVA,
175 IOMMU_COOKIE_IOMMUFD,
176 };
177
178 /* Domain feature flags */
179 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
180 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
181 implementation */
182 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
183 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
184
185 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
186 #define __IOMMU_DOMAIN_PLATFORM (1U << 5)
187
188 #define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
189 on a stage-2 translation */
190
191 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
192 /*
193 * This are the possible domain-types
194 *
195 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
196 * devices
197 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
198 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
199 * for VMs
200 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
201 * This flag allows IOMMU drivers to implement
202 * certain optimizations for these domains
203 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
204 * invalidation.
205 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
206 * represented by mm_struct's.
207 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
208 * dma_api stuff. Do not use in new drivers.
209 */
210 #define IOMMU_DOMAIN_BLOCKED (0U)
211 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
212 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
213 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
214 __IOMMU_DOMAIN_DMA_API)
215 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
216 __IOMMU_DOMAIN_DMA_API | \
217 __IOMMU_DOMAIN_DMA_FQ)
218 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
219 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
220 #define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
221
222 struct iommu_domain {
223 unsigned type;
224 enum iommu_domain_cookie_type cookie_type;
225 const struct iommu_domain_ops *ops;
226 const struct iommu_dirty_ops *dirty_ops;
227 const struct iommu_ops *owner; /* Whose domain_alloc we came from */
228 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
229 struct iommu_domain_geometry geometry;
230 int (*iopf_handler)(struct iopf_group *group);
231
232 union { /* cookie */
233 struct iommu_dma_cookie *iova_cookie;
234 struct iommu_dma_msi_cookie *msi_cookie;
235 struct iommufd_hw_pagetable *iommufd_hwpt;
236 struct {
237 iommu_fault_handler_t handler;
238 void *handler_token;
239 };
240 struct { /* IOMMU_DOMAIN_SVA */
241 struct mm_struct *mm;
242 int users;
243 /*
244 * Next iommu_domain in mm->iommu_mm->sva-domains list
245 * protected by iommu_sva_lock.
246 */
247 struct list_head next;
248 };
249 };
250 };
251
iommu_is_dma_domain(struct iommu_domain * domain)252 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
253 {
254 return domain->type & __IOMMU_DOMAIN_DMA_API;
255 }
256
257 enum iommu_cap {
258 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
259 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
260 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
261 DMA protection and we should too */
262 /*
263 * Per-device flag indicating if enforce_cache_coherency() will work on
264 * this device.
265 */
266 IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
267 /*
268 * IOMMU driver does not issue TLB maintenance during .unmap, so can
269 * usefully support the non-strict DMA flush queue.
270 */
271 IOMMU_CAP_DEFERRED_FLUSH,
272 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
273 };
274
275 /* These are the possible reserved region types */
276 enum iommu_resv_type {
277 /* Memory regions which must be mapped 1:1 at all times */
278 IOMMU_RESV_DIRECT,
279 /*
280 * Memory regions which are advertised to be 1:1 but are
281 * commonly considered relaxable in some conditions,
282 * for instance in device assignment use case (USB, Graphics)
283 */
284 IOMMU_RESV_DIRECT_RELAXABLE,
285 /* Arbitrary "never map this or give it to a device" address ranges */
286 IOMMU_RESV_RESERVED,
287 /* Hardware MSI region (untranslated) */
288 IOMMU_RESV_MSI,
289 /* Software-managed MSI translation window */
290 IOMMU_RESV_SW_MSI,
291 };
292
293 /**
294 * struct iommu_resv_region - descriptor for a reserved memory region
295 * @list: Linked list pointers
296 * @start: System physical start address of the region
297 * @length: Length of the region in bytes
298 * @prot: IOMMU Protection flags (READ/WRITE/...)
299 * @type: Type of the reserved region
300 * @free: Callback to free associated memory allocations
301 */
302 struct iommu_resv_region {
303 struct list_head list;
304 phys_addr_t start;
305 size_t length;
306 int prot;
307 enum iommu_resv_type type;
308 void (*free)(struct device *dev, struct iommu_resv_region *region);
309 };
310
311 struct iommu_iort_rmr_data {
312 struct iommu_resv_region rr;
313
314 /* Stream IDs associated with IORT RMR entry */
315 const u32 *sids;
316 u32 num_sids;
317 };
318
319 /**
320 * enum iommu_dev_features - Per device IOMMU features
321 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
322 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
323 * enabling %IOMMU_DEV_FEAT_SVA requires
324 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
325 * Faults themselves instead of relying on the IOMMU. When
326 * supported, this feature must be enabled before and
327 * disabled after %IOMMU_DEV_FEAT_SVA.
328 *
329 * Device drivers enable a feature using iommu_dev_enable_feature().
330 */
331 enum iommu_dev_features {
332 IOMMU_DEV_FEAT_SVA,
333 IOMMU_DEV_FEAT_IOPF,
334 };
335
336 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
337 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
338 #define IOMMU_PASID_INVALID (-1U)
339 typedef unsigned int ioasid_t;
340
341 /* Read but do not clear any dirty bits */
342 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
343
344 #ifdef CONFIG_IOMMU_API
345
346 /**
347 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
348 *
349 * @start: IOVA representing the start of the range to be flushed
350 * @end: IOVA representing the end of the range to be flushed (inclusive)
351 * @pgsize: The interval at which to perform the flush
352 * @freelist: Removed pages to free after sync
353 * @queued: Indicates that the flush will be queued
354 *
355 * This structure is intended to be updated by multiple calls to the
356 * ->unmap() function in struct iommu_ops before eventually being passed
357 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
358 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
359 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
360 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
361 */
362 struct iommu_iotlb_gather {
363 unsigned long start;
364 unsigned long end;
365 size_t pgsize;
366 struct list_head freelist;
367 bool queued;
368 };
369
370 /**
371 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
372 * @bitmap: IOVA bitmap
373 * @gather: Range information for a pending IOTLB flush
374 */
375 struct iommu_dirty_bitmap {
376 struct iova_bitmap *bitmap;
377 struct iommu_iotlb_gather *gather;
378 };
379
380 /**
381 * struct iommu_dirty_ops - domain specific dirty tracking operations
382 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
383 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
384 * into a bitmap, with a bit represented as a page.
385 * Reads the dirty PTE bits and clears it from IO
386 * pagetables.
387 */
388 struct iommu_dirty_ops {
389 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
390 int (*read_and_clear_dirty)(struct iommu_domain *domain,
391 unsigned long iova, size_t size,
392 unsigned long flags,
393 struct iommu_dirty_bitmap *dirty);
394 };
395
396 /**
397 * struct iommu_user_data - iommu driver specific user space data info
398 * @type: The data type of the user buffer
399 * @uptr: Pointer to the user buffer for copy_from_user()
400 * @len: The length of the user buffer in bytes
401 *
402 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
403 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
404 */
405 struct iommu_user_data {
406 unsigned int type;
407 void __user *uptr;
408 size_t len;
409 };
410
411 /**
412 * struct iommu_user_data_array - iommu driver specific user space data array
413 * @type: The data type of all the entries in the user buffer array
414 * @uptr: Pointer to the user buffer array
415 * @entry_len: The fixed-width length of an entry in the array, in bytes
416 * @entry_num: The number of total entries in the array
417 *
418 * The user buffer includes an array of requests with format defined in
419 * include/uapi/linux/iommufd.h
420 */
421 struct iommu_user_data_array {
422 unsigned int type;
423 void __user *uptr;
424 size_t entry_len;
425 u32 entry_num;
426 };
427
428 /**
429 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
430 * @dst_data: Pointer to an iommu driver specific user data that is defined in
431 * include/uapi/linux/iommufd.h
432 * @src_data: Pointer to a struct iommu_user_data for user space data info
433 * @data_type: The data type of the @dst_data. Must match with @src_data.type
434 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
435 * @min_len: Initial length of user data structure for backward compatibility.
436 * This should be offsetofend using the last member in the user data
437 * struct that was initially added to include/uapi/linux/iommufd.h
438 */
__iommu_copy_struct_from_user(void * dst_data,const struct iommu_user_data * src_data,unsigned int data_type,size_t data_len,size_t min_len)439 static inline int __iommu_copy_struct_from_user(
440 void *dst_data, const struct iommu_user_data *src_data,
441 unsigned int data_type, size_t data_len, size_t min_len)
442 {
443 if (WARN_ON(!dst_data || !src_data))
444 return -EINVAL;
445 if (src_data->type != data_type)
446 return -EINVAL;
447 if (src_data->len < min_len || data_len < src_data->len)
448 return -EINVAL;
449 return copy_struct_from_user(dst_data, data_len, src_data->uptr,
450 src_data->len);
451 }
452
453 /**
454 * iommu_copy_struct_from_user - Copy iommu driver specific user space data
455 * @kdst: Pointer to an iommu driver specific user data that is defined in
456 * include/uapi/linux/iommufd.h
457 * @user_data: Pointer to a struct iommu_user_data for user space data info
458 * @data_type: The data type of the @kdst. Must match with @user_data->type
459 * @min_last: The last member of the data structure @kdst points in the initial
460 * version.
461 * Return 0 for success, otherwise -error.
462 */
463 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
464 __iommu_copy_struct_from_user(kdst, user_data, data_type, \
465 sizeof(*kdst), \
466 offsetofend(typeof(*kdst), min_last))
467
468 /**
469 * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
470 * data from an iommu_user_data_array
471 * @dst_data: Pointer to an iommu driver specific user data that is defined in
472 * include/uapi/linux/iommufd.h
473 * @src_array: Pointer to a struct iommu_user_data_array for a user space array
474 * @data_type: The data type of the @dst_data. Must match with @src_array.type
475 * @index: Index to the location in the array to copy user data from
476 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
477 * @min_len: Initial length of user data structure for backward compatibility.
478 * This should be offsetofend using the last member in the user data
479 * struct that was initially added to include/uapi/linux/iommufd.h
480 */
__iommu_copy_struct_from_user_array(void * dst_data,const struct iommu_user_data_array * src_array,unsigned int data_type,unsigned int index,size_t data_len,size_t min_len)481 static inline int __iommu_copy_struct_from_user_array(
482 void *dst_data, const struct iommu_user_data_array *src_array,
483 unsigned int data_type, unsigned int index, size_t data_len,
484 size_t min_len)
485 {
486 struct iommu_user_data src_data;
487
488 if (WARN_ON(!src_array || index >= src_array->entry_num))
489 return -EINVAL;
490 if (!src_array->entry_num)
491 return -EINVAL;
492 src_data.uptr = src_array->uptr + src_array->entry_len * index;
493 src_data.len = src_array->entry_len;
494 src_data.type = src_array->type;
495
496 return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
497 data_len, min_len);
498 }
499
500 /**
501 * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
502 * data from an iommu_user_data_array
503 * @kdst: Pointer to an iommu driver specific user data that is defined in
504 * include/uapi/linux/iommufd.h
505 * @user_array: Pointer to a struct iommu_user_data_array for a user space
506 * array
507 * @data_type: The data type of the @kdst. Must match with @user_array->type
508 * @index: Index to the location in the array to copy user data from
509 * @min_last: The last member of the data structure @kdst points in the
510 * initial version.
511 *
512 * Copy a single entry from a user array. Return 0 for success, otherwise
513 * -error.
514 */
515 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
516 min_last) \
517 __iommu_copy_struct_from_user_array( \
518 kdst, user_array, data_type, index, sizeof(*(kdst)), \
519 offsetofend(typeof(*(kdst)), min_last))
520
521 /**
522 * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
523 * space data from an iommu_user_data_array
524 * @kdst: Pointer to an iommu driver specific user data that is defined in
525 * include/uapi/linux/iommufd.h
526 * @kdst_entry_size: sizeof(*kdst)
527 * @user_array: Pointer to a struct iommu_user_data_array for a user space
528 * array
529 * @data_type: The data type of the @kdst. Must match with @user_array->type
530 *
531 * Copy the entire user array. kdst must have room for kdst_entry_size *
532 * user_array->entry_num bytes. Return 0 for success, otherwise -error.
533 */
534 static inline int
iommu_copy_struct_from_full_user_array(void * kdst,size_t kdst_entry_size,struct iommu_user_data_array * user_array,unsigned int data_type)535 iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
536 struct iommu_user_data_array *user_array,
537 unsigned int data_type)
538 {
539 unsigned int i;
540 int ret;
541
542 if (user_array->type != data_type)
543 return -EINVAL;
544 if (!user_array->entry_num)
545 return -EINVAL;
546 if (likely(user_array->entry_len == kdst_entry_size)) {
547 if (copy_from_user(kdst, user_array->uptr,
548 user_array->entry_num *
549 user_array->entry_len))
550 return -EFAULT;
551 }
552
553 /* Copy item by item */
554 for (i = 0; i != user_array->entry_num; i++) {
555 ret = copy_struct_from_user(
556 kdst + kdst_entry_size * i, kdst_entry_size,
557 user_array->uptr + user_array->entry_len * i,
558 user_array->entry_len);
559 if (ret)
560 return ret;
561 }
562 return 0;
563 }
564
565 /**
566 * struct iommu_ops - iommu ops and capabilities
567 * @capable: check capability
568 * @hw_info: report iommu hardware information. The data buffer returned by this
569 * op is allocated in the iommu driver and freed by the caller after
570 * use. The information type is one of enum iommu_hw_info_type defined
571 * in include/uapi/linux/iommufd.h.
572 * @domain_alloc: allocate and return an iommu domain if success. Otherwise
573 * NULL is returned. The domain is not fully initialized until
574 * the caller iommu_domain_alloc() returns.
575 * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
576 * input parameters as defined in
577 * include/uapi/linux/iommufd.h. The @user_data can be
578 * optionally provided, the new domain must support
579 * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
580 * returned.
581 * @domain_alloc_paging: Allocate an iommu_domain that can be used for
582 * UNMANAGED, DMA, and DMA_FQ domain types. This is the
583 * same as invoking domain_alloc_paging_flags() with
584 * @flags=0, @user_data=NULL. A driver should implement
585 * only one of the two ops.
586 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
587 * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
588 * @probe_device: Add device to iommu driver handling
589 * @release_device: Remove device from iommu driver handling
590 * @probe_finalize: Do final setup work after the device is added to an IOMMU
591 * group and attached to the groups domain
592 * @device_group: find iommu group for a particular device
593 * @get_resv_regions: Request list of reserved regions for a device
594 * @of_xlate: add OF master IDs to iommu grouping
595 * @is_attach_deferred: Check if domain attach should be deferred from iommu
596 * driver init to device driver init (default no)
597 * @dev_enable/disable_feat: per device entries to enable/disable
598 * iommu specific features.
599 * @page_response: handle page request response
600 * @def_domain_type: device default domain type, return value:
601 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
602 * - IOMMU_DOMAIN_DMA: must use a dma domain
603 * - 0: use the default setting
604 * @default_domain_ops: the default ops for domains
605 * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind
606 * the @dev, as the set of virtualization resources shared/passed
607 * to user space IOMMU instance. And associate it with a nesting
608 * @parent_domain. The @viommu_type must be defined in the header
609 * include/uapi/linux/iommufd.h
610 * It is required to call iommufd_viommu_alloc() helper for
611 * a bundled allocation of the core and the driver structures,
612 * using the given @ictx pointer.
613 * @pgsize_bitmap: bitmap of all possible supported page sizes
614 * @owner: Driver module providing these ops
615 * @identity_domain: An always available, always attachable identity
616 * translation.
617 * @blocked_domain: An always available, always attachable blocking
618 * translation.
619 * @default_domain: If not NULL this will always be set as the default domain.
620 * This should be an IDENTITY/BLOCKED/PLATFORM domain.
621 * Do not use in new drivers.
622 * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
623 * no user domain for each PASID and the I/O page faults are
624 * forwarded through the user domain attached to the device
625 * RID.
626 */
627 struct iommu_ops {
628 bool (*capable)(struct device *dev, enum iommu_cap);
629 void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
630
631 /* Domain allocation and freeing by the iommu driver */
632 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
633 struct iommu_domain *(*domain_alloc_paging_flags)(
634 struct device *dev, u32 flags,
635 const struct iommu_user_data *user_data);
636 struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
637 struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
638 struct mm_struct *mm);
639 struct iommu_domain *(*domain_alloc_nested)(
640 struct device *dev, struct iommu_domain *parent, u32 flags,
641 const struct iommu_user_data *user_data);
642
643 struct iommu_device *(*probe_device)(struct device *dev);
644 void (*release_device)(struct device *dev);
645 void (*probe_finalize)(struct device *dev);
646 struct iommu_group *(*device_group)(struct device *dev);
647
648 /* Request/Free a list of reserved regions for a device */
649 void (*get_resv_regions)(struct device *dev, struct list_head *list);
650
651 int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
652 bool (*is_attach_deferred)(struct device *dev);
653
654 /* Per device IOMMU features */
655 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
656 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
657
658 void (*page_response)(struct device *dev, struct iopf_fault *evt,
659 struct iommu_page_response *msg);
660
661 int (*def_domain_type)(struct device *dev);
662
663 struct iommufd_viommu *(*viommu_alloc)(
664 struct device *dev, struct iommu_domain *parent_domain,
665 struct iommufd_ctx *ictx, unsigned int viommu_type);
666
667 const struct iommu_domain_ops *default_domain_ops;
668 unsigned long pgsize_bitmap;
669 struct module *owner;
670 struct iommu_domain *identity_domain;
671 struct iommu_domain *blocked_domain;
672 struct iommu_domain *release_domain;
673 struct iommu_domain *default_domain;
674 u8 user_pasid_table:1;
675 };
676
677 /**
678 * struct iommu_domain_ops - domain specific operations
679 * @attach_dev: attach an iommu domain to a device
680 * Return:
681 * * 0 - success
682 * * EINVAL - can indicate that device and domain are incompatible due to
683 * some previous configuration of the domain, in which case the
684 * driver shouldn't log an error, since it is legitimate for a
685 * caller to test reuse of existing domains. Otherwise, it may
686 * still represent some other fundamental problem
687 * * ENOMEM - out of memory
688 * * ENOSPC - non-ENOMEM type of resource allocation failures
689 * * EBUSY - device is attached to a domain and cannot be changed
690 * * ENODEV - device specific errors, not able to be attached
691 * * <others> - treated as ENODEV by the caller. Use is discouraged
692 * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
693 * the device should be left in the old config in error case.
694 * @map_pages: map a physically contiguous set of pages of the same size to
695 * an iommu domain.
696 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
697 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
698 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
699 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
700 * queue
701 * @cache_invalidate_user: Flush hardware cache for user space IO page table.
702 * The @domain must be IOMMU_DOMAIN_NESTED. The @array
703 * passes in the cache invalidation requests, in form
704 * of a driver data structure. The driver must update
705 * array->entry_num to report the number of handled
706 * invalidation requests. The driver data structure
707 * must be defined in include/uapi/linux/iommufd.h
708 * @iova_to_phys: translate iova to physical address
709 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
710 * including no-snoop TLPs on PCIe or other platform
711 * specific mechanisms.
712 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
713 * @free: Release the domain after use.
714 */
715 struct iommu_domain_ops {
716 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
717 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
718 ioasid_t pasid, struct iommu_domain *old);
719
720 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
721 phys_addr_t paddr, size_t pgsize, size_t pgcount,
722 int prot, gfp_t gfp, size_t *mapped);
723 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
724 size_t pgsize, size_t pgcount,
725 struct iommu_iotlb_gather *iotlb_gather);
726
727 void (*flush_iotlb_all)(struct iommu_domain *domain);
728 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
729 size_t size);
730 void (*iotlb_sync)(struct iommu_domain *domain,
731 struct iommu_iotlb_gather *iotlb_gather);
732 int (*cache_invalidate_user)(struct iommu_domain *domain,
733 struct iommu_user_data_array *array);
734
735 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
736 dma_addr_t iova);
737
738 bool (*enforce_cache_coherency)(struct iommu_domain *domain);
739 int (*set_pgtable_quirks)(struct iommu_domain *domain,
740 unsigned long quirks);
741
742 void (*free)(struct iommu_domain *domain);
743 };
744
745 /**
746 * struct iommu_device - IOMMU core representation of one IOMMU hardware
747 * instance
748 * @list: Used by the iommu-core to keep a list of registered iommus
749 * @ops: iommu-ops for talking to this iommu
750 * @dev: struct device for sysfs handling
751 * @singleton_group: Used internally for drivers that have only one group
752 * @max_pasids: number of supported PASIDs
753 */
754 struct iommu_device {
755 struct list_head list;
756 const struct iommu_ops *ops;
757 struct fwnode_handle *fwnode;
758 struct device *dev;
759 struct iommu_group *singleton_group;
760 u32 max_pasids;
761 };
762
763 /**
764 * struct iommu_fault_param - per-device IOMMU fault data
765 * @lock: protect pending faults list
766 * @users: user counter to manage the lifetime of the data
767 * @rcu: rcu head for kfree_rcu()
768 * @dev: the device that owns this param
769 * @queue: IOPF queue
770 * @queue_list: index into queue->devices
771 * @partial: faults that are part of a Page Request Group for which the last
772 * request hasn't been submitted yet.
773 * @faults: holds the pending faults which need response
774 */
775 struct iommu_fault_param {
776 struct mutex lock;
777 refcount_t users;
778 struct rcu_head rcu;
779
780 struct device *dev;
781 struct iopf_queue *queue;
782 struct list_head queue_list;
783
784 struct list_head partial;
785 struct list_head faults;
786 };
787
788 /**
789 * struct dev_iommu - Collection of per-device IOMMU data
790 *
791 * @fault_param: IOMMU detected device fault reporting data
792 * @fwspec: IOMMU fwspec data
793 * @iommu_dev: IOMMU device this device is linked to
794 * @priv: IOMMU Driver private data
795 * @max_pasids: number of PASIDs this device can consume
796 * @attach_deferred: the dma domain attachment is deferred
797 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
798 * @require_direct: device requires IOMMU_RESV_DIRECT regions
799 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
800 *
801 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
802 * struct iommu_group *iommu_group;
803 */
804 struct dev_iommu {
805 struct mutex lock;
806 struct iommu_fault_param __rcu *fault_param;
807 struct iommu_fwspec *fwspec;
808 struct iommu_device *iommu_dev;
809 void *priv;
810 u32 max_pasids;
811 u32 attach_deferred:1;
812 u32 pci_32bit_workaround:1;
813 u32 require_direct:1;
814 u32 shadow_on_flush:1;
815 };
816
817 int iommu_device_register(struct iommu_device *iommu,
818 const struct iommu_ops *ops,
819 struct device *hwdev);
820 void iommu_device_unregister(struct iommu_device *iommu);
821 int iommu_device_sysfs_add(struct iommu_device *iommu,
822 struct device *parent,
823 const struct attribute_group **groups,
824 const char *fmt, ...) __printf(4, 5);
825 void iommu_device_sysfs_remove(struct iommu_device *iommu);
826 int iommu_device_link(struct iommu_device *iommu, struct device *link);
827 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
828 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
829
dev_to_iommu_device(struct device * dev)830 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
831 {
832 return (struct iommu_device *)dev_get_drvdata(dev);
833 }
834
835 /**
836 * iommu_get_iommu_dev - Get iommu_device for a device
837 * @dev: an end-point device
838 *
839 * Note that this function must be called from the iommu_ops
840 * to retrieve the iommu_device for a device, which the core code
841 * guarentees it will not invoke the op without an attached iommu.
842 */
__iommu_get_iommu_dev(struct device * dev)843 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
844 {
845 return dev->iommu->iommu_dev;
846 }
847
848 #define iommu_get_iommu_dev(dev, type, member) \
849 container_of(__iommu_get_iommu_dev(dev), type, member)
850
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)851 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
852 {
853 *gather = (struct iommu_iotlb_gather) {
854 .start = ULONG_MAX,
855 .freelist = LIST_HEAD_INIT(gather->freelist),
856 };
857 }
858
859 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
860 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
861 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
iommu_paging_domain_alloc(struct device * dev)862 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
863 {
864 return iommu_paging_domain_alloc_flags(dev, 0);
865 }
866 extern void iommu_domain_free(struct iommu_domain *domain);
867 extern int iommu_attach_device(struct iommu_domain *domain,
868 struct device *dev);
869 extern void iommu_detach_device(struct iommu_domain *domain,
870 struct device *dev);
871 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
872 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
873 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
874 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
875 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
876 size_t size);
877 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
878 unsigned long iova, size_t size,
879 struct iommu_iotlb_gather *iotlb_gather);
880 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
881 struct scatterlist *sg, unsigned int nents,
882 int prot, gfp_t gfp);
883 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
884 extern void iommu_set_fault_handler(struct iommu_domain *domain,
885 iommu_fault_handler_t handler, void *token);
886
887 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
888 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
889 extern void iommu_set_default_passthrough(bool cmd_line);
890 extern void iommu_set_default_translated(bool cmd_line);
891 extern bool iommu_default_passthrough(void);
892 extern struct iommu_resv_region *
893 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
894 enum iommu_resv_type type, gfp_t gfp);
895 extern int iommu_get_group_resv_regions(struct iommu_group *group,
896 struct list_head *head);
897
898 extern int iommu_attach_group(struct iommu_domain *domain,
899 struct iommu_group *group);
900 extern void iommu_detach_group(struct iommu_domain *domain,
901 struct iommu_group *group);
902 extern struct iommu_group *iommu_group_alloc(void);
903 extern void *iommu_group_get_iommudata(struct iommu_group *group);
904 extern void iommu_group_set_iommudata(struct iommu_group *group,
905 void *iommu_data,
906 void (*release)(void *iommu_data));
907 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
908 extern int iommu_group_add_device(struct iommu_group *group,
909 struct device *dev);
910 extern void iommu_group_remove_device(struct device *dev);
911 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
912 int (*fn)(struct device *, void *));
913 extern struct iommu_group *iommu_group_get(struct device *dev);
914 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
915 extern void iommu_group_put(struct iommu_group *group);
916
917 extern int iommu_group_id(struct iommu_group *group);
918 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
919
920 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
921 unsigned long quirks);
922
923 void iommu_set_dma_strict(void);
924
925 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
926 unsigned long iova, int flags);
927
iommu_flush_iotlb_all(struct iommu_domain * domain)928 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
929 {
930 if (domain->ops->flush_iotlb_all)
931 domain->ops->flush_iotlb_all(domain);
932 }
933
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)934 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
935 struct iommu_iotlb_gather *iotlb_gather)
936 {
937 if (domain->ops->iotlb_sync)
938 domain->ops->iotlb_sync(domain, iotlb_gather);
939
940 iommu_iotlb_gather_init(iotlb_gather);
941 }
942
943 /**
944 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
945 *
946 * @gather: TLB gather data
947 * @iova: start of page to invalidate
948 * @size: size of page to invalidate
949 *
950 * Helper for IOMMU drivers to check whether a new range and the gathered range
951 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
952 * than merging the two, which might lead to unnecessary invalidations.
953 */
954 static inline
iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)955 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
956 unsigned long iova, size_t size)
957 {
958 unsigned long start = iova, end = start + size - 1;
959
960 return gather->end != 0 &&
961 (end + 1 < gather->start || start > gather->end + 1);
962 }
963
964
965 /**
966 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
967 * @gather: TLB gather data
968 * @iova: start of page to invalidate
969 * @size: size of page to invalidate
970 *
971 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
972 * where only the address range matters, and simply minimising intermediate
973 * syncs is preferred.
974 */
iommu_iotlb_gather_add_range(struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)975 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
976 unsigned long iova, size_t size)
977 {
978 unsigned long end = iova + size - 1;
979
980 if (gather->start > iova)
981 gather->start = iova;
982 if (gather->end < end)
983 gather->end = end;
984 }
985
986 /**
987 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
988 * @domain: IOMMU domain to be invalidated
989 * @gather: TLB gather data
990 * @iova: start of page to invalidate
991 * @size: size of page to invalidate
992 *
993 * Helper for IOMMU drivers to build invalidation commands based on individual
994 * pages, or with page size/table level hints which cannot be gathered if they
995 * differ.
996 */
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)997 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
998 struct iommu_iotlb_gather *gather,
999 unsigned long iova, size_t size)
1000 {
1001 /*
1002 * If the new page is disjoint from the current range or is mapped at
1003 * a different granularity, then sync the TLB so that the gather
1004 * structure can be rewritten.
1005 */
1006 if ((gather->pgsize && gather->pgsize != size) ||
1007 iommu_iotlb_gather_is_disjoint(gather, iova, size))
1008 iommu_iotlb_sync(domain, gather);
1009
1010 gather->pgsize = size;
1011 iommu_iotlb_gather_add_range(gather, iova, size);
1012 }
1013
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1014 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1015 {
1016 return gather && gather->queued;
1017 }
1018
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1019 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1020 struct iova_bitmap *bitmap,
1021 struct iommu_iotlb_gather *gather)
1022 {
1023 if (gather)
1024 iommu_iotlb_gather_init(gather);
1025
1026 dirty->bitmap = bitmap;
1027 dirty->gather = gather;
1028 }
1029
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1030 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1031 unsigned long iova,
1032 unsigned long length)
1033 {
1034 if (dirty->bitmap)
1035 iova_bitmap_set(dirty->bitmap, iova, length);
1036
1037 if (dirty->gather)
1038 iommu_iotlb_gather_add_range(dirty->gather, iova, length);
1039 }
1040
1041 /* PCI device grouping function */
1042 extern struct iommu_group *pci_device_group(struct device *dev);
1043 /* Generic device grouping function */
1044 extern struct iommu_group *generic_device_group(struct device *dev);
1045 /* FSL-MC device grouping function */
1046 struct iommu_group *fsl_mc_device_group(struct device *dev);
1047 extern struct iommu_group *generic_single_device_group(struct device *dev);
1048
1049 /**
1050 * struct iommu_fwspec - per-device IOMMU instance data
1051 * @iommu_fwnode: firmware handle for this device's IOMMU
1052 * @flags: IOMMU_FWSPEC_* flags
1053 * @num_ids: number of associated device IDs
1054 * @ids: IDs which this device may present to the IOMMU
1055 *
1056 * Note that the IDs (and any other information, really) stored in this structure should be
1057 * considered private to the IOMMU device driver and are not to be used directly by IOMMU
1058 * consumers.
1059 */
1060 struct iommu_fwspec {
1061 struct fwnode_handle *iommu_fwnode;
1062 u32 flags;
1063 unsigned int num_ids;
1064 u32 ids[];
1065 };
1066
1067 /* ATS is supported */
1068 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
1069 /* CANWBS is supported */
1070 #define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1)
1071
1072 /*
1073 * An iommu attach handle represents a relationship between an iommu domain
1074 * and a PASID or RID of a device. It is allocated and managed by the component
1075 * that manages the domain and is stored in the iommu group during the time the
1076 * domain is attached.
1077 */
1078 struct iommu_attach_handle {
1079 struct iommu_domain *domain;
1080 };
1081
1082 /**
1083 * struct iommu_sva - handle to a device-mm bond
1084 */
1085 struct iommu_sva {
1086 struct iommu_attach_handle handle;
1087 struct device *dev;
1088 refcount_t users;
1089 };
1090
1091 struct iommu_mm_data {
1092 u32 pasid;
1093 struct list_head sva_domains;
1094 };
1095
1096 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
1097 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1098
dev_iommu_fwspec_get(struct device * dev)1099 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1100 {
1101 if (dev->iommu)
1102 return dev->iommu->fwspec;
1103 else
1104 return NULL;
1105 }
1106
dev_iommu_fwspec_set(struct device * dev,struct iommu_fwspec * fwspec)1107 static inline void dev_iommu_fwspec_set(struct device *dev,
1108 struct iommu_fwspec *fwspec)
1109 {
1110 dev->iommu->fwspec = fwspec;
1111 }
1112
dev_iommu_priv_get(struct device * dev)1113 static inline void *dev_iommu_priv_get(struct device *dev)
1114 {
1115 if (dev->iommu)
1116 return dev->iommu->priv;
1117 else
1118 return NULL;
1119 }
1120
1121 void dev_iommu_priv_set(struct device *dev, void *priv);
1122
1123 extern struct mutex iommu_probe_device_lock;
1124 int iommu_probe_device(struct device *dev);
1125
1126 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
1127 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
1128
1129 int iommu_device_use_default_domain(struct device *dev);
1130 void iommu_device_unuse_default_domain(struct device *dev);
1131
1132 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1133 void iommu_group_release_dma_owner(struct iommu_group *group);
1134 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1135
1136 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1137 void iommu_device_release_dma_owner(struct device *dev);
1138
1139 int iommu_attach_device_pasid(struct iommu_domain *domain,
1140 struct device *dev, ioasid_t pasid,
1141 struct iommu_attach_handle *handle);
1142 void iommu_detach_device_pasid(struct iommu_domain *domain,
1143 struct device *dev, ioasid_t pasid);
1144 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1145 void iommu_free_global_pasid(ioasid_t pasid);
1146 #else /* CONFIG_IOMMU_API */
1147
1148 struct iommu_ops {};
1149 struct iommu_group {};
1150 struct iommu_fwspec {};
1151 struct iommu_device {};
1152 struct iommu_fault_param {};
1153 struct iommu_iotlb_gather {};
1154 struct iommu_dirty_bitmap {};
1155 struct iommu_dirty_ops {};
1156
device_iommu_capable(struct device * dev,enum iommu_cap cap)1157 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1158 {
1159 return false;
1160 }
1161
iommu_paging_domain_alloc_flags(struct device * dev,unsigned int flags)1162 static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
1163 unsigned int flags)
1164 {
1165 return ERR_PTR(-ENODEV);
1166 }
1167
iommu_paging_domain_alloc(struct device * dev)1168 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
1169 {
1170 return ERR_PTR(-ENODEV);
1171 }
1172
iommu_domain_free(struct iommu_domain * domain)1173 static inline void iommu_domain_free(struct iommu_domain *domain)
1174 {
1175 }
1176
iommu_attach_device(struct iommu_domain * domain,struct device * dev)1177 static inline int iommu_attach_device(struct iommu_domain *domain,
1178 struct device *dev)
1179 {
1180 return -ENODEV;
1181 }
1182
iommu_detach_device(struct iommu_domain * domain,struct device * dev)1183 static inline void iommu_detach_device(struct iommu_domain *domain,
1184 struct device *dev)
1185 {
1186 }
1187
iommu_get_domain_for_dev(struct device * dev)1188 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1189 {
1190 return NULL;
1191 }
1192
iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1193 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1194 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1195 {
1196 return -ENODEV;
1197 }
1198
iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size)1199 static inline size_t iommu_unmap(struct iommu_domain *domain,
1200 unsigned long iova, size_t size)
1201 {
1202 return 0;
1203 }
1204
iommu_unmap_fast(struct iommu_domain * domain,unsigned long iova,int gfp_order,struct iommu_iotlb_gather * iotlb_gather)1205 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1206 unsigned long iova, int gfp_order,
1207 struct iommu_iotlb_gather *iotlb_gather)
1208 {
1209 return 0;
1210 }
1211
iommu_map_sg(struct iommu_domain * domain,unsigned long iova,struct scatterlist * sg,unsigned int nents,int prot,gfp_t gfp)1212 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1213 unsigned long iova, struct scatterlist *sg,
1214 unsigned int nents, int prot, gfp_t gfp)
1215 {
1216 return -ENODEV;
1217 }
1218
iommu_flush_iotlb_all(struct iommu_domain * domain)1219 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1220 {
1221 }
1222
iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)1223 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1224 struct iommu_iotlb_gather *iotlb_gather)
1225 {
1226 }
1227
iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)1228 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1229 {
1230 return 0;
1231 }
1232
iommu_set_fault_handler(struct iommu_domain * domain,iommu_fault_handler_t handler,void * token)1233 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1234 iommu_fault_handler_t handler, void *token)
1235 {
1236 }
1237
iommu_get_resv_regions(struct device * dev,struct list_head * list)1238 static inline void iommu_get_resv_regions(struct device *dev,
1239 struct list_head *list)
1240 {
1241 }
1242
iommu_put_resv_regions(struct device * dev,struct list_head * list)1243 static inline void iommu_put_resv_regions(struct device *dev,
1244 struct list_head *list)
1245 {
1246 }
1247
iommu_get_group_resv_regions(struct iommu_group * group,struct list_head * head)1248 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1249 struct list_head *head)
1250 {
1251 return -ENODEV;
1252 }
1253
iommu_set_default_passthrough(bool cmd_line)1254 static inline void iommu_set_default_passthrough(bool cmd_line)
1255 {
1256 }
1257
iommu_set_default_translated(bool cmd_line)1258 static inline void iommu_set_default_translated(bool cmd_line)
1259 {
1260 }
1261
iommu_default_passthrough(void)1262 static inline bool iommu_default_passthrough(void)
1263 {
1264 return true;
1265 }
1266
iommu_attach_group(struct iommu_domain * domain,struct iommu_group * group)1267 static inline int iommu_attach_group(struct iommu_domain *domain,
1268 struct iommu_group *group)
1269 {
1270 return -ENODEV;
1271 }
1272
iommu_detach_group(struct iommu_domain * domain,struct iommu_group * group)1273 static inline void iommu_detach_group(struct iommu_domain *domain,
1274 struct iommu_group *group)
1275 {
1276 }
1277
iommu_group_alloc(void)1278 static inline struct iommu_group *iommu_group_alloc(void)
1279 {
1280 return ERR_PTR(-ENODEV);
1281 }
1282
iommu_group_get_iommudata(struct iommu_group * group)1283 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1284 {
1285 return NULL;
1286 }
1287
iommu_group_set_iommudata(struct iommu_group * group,void * iommu_data,void (* release)(void * iommu_data))1288 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1289 void *iommu_data,
1290 void (*release)(void *iommu_data))
1291 {
1292 }
1293
iommu_group_set_name(struct iommu_group * group,const char * name)1294 static inline int iommu_group_set_name(struct iommu_group *group,
1295 const char *name)
1296 {
1297 return -ENODEV;
1298 }
1299
iommu_group_add_device(struct iommu_group * group,struct device * dev)1300 static inline int iommu_group_add_device(struct iommu_group *group,
1301 struct device *dev)
1302 {
1303 return -ENODEV;
1304 }
1305
iommu_group_remove_device(struct device * dev)1306 static inline void iommu_group_remove_device(struct device *dev)
1307 {
1308 }
1309
iommu_group_for_each_dev(struct iommu_group * group,void * data,int (* fn)(struct device *,void *))1310 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1311 void *data,
1312 int (*fn)(struct device *, void *))
1313 {
1314 return -ENODEV;
1315 }
1316
iommu_group_get(struct device * dev)1317 static inline struct iommu_group *iommu_group_get(struct device *dev)
1318 {
1319 return NULL;
1320 }
1321
iommu_group_put(struct iommu_group * group)1322 static inline void iommu_group_put(struct iommu_group *group)
1323 {
1324 }
1325
iommu_group_id(struct iommu_group * group)1326 static inline int iommu_group_id(struct iommu_group *group)
1327 {
1328 return -ENODEV;
1329 }
1330
iommu_set_pgtable_quirks(struct iommu_domain * domain,unsigned long quirks)1331 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1332 unsigned long quirks)
1333 {
1334 return 0;
1335 }
1336
iommu_device_register(struct iommu_device * iommu,const struct iommu_ops * ops,struct device * hwdev)1337 static inline int iommu_device_register(struct iommu_device *iommu,
1338 const struct iommu_ops *ops,
1339 struct device *hwdev)
1340 {
1341 return -ENODEV;
1342 }
1343
dev_to_iommu_device(struct device * dev)1344 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1345 {
1346 return NULL;
1347 }
1348
iommu_iotlb_gather_init(struct iommu_iotlb_gather * gather)1349 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1350 {
1351 }
1352
iommu_iotlb_gather_add_page(struct iommu_domain * domain,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size)1353 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1354 struct iommu_iotlb_gather *gather,
1355 unsigned long iova, size_t size)
1356 {
1357 }
1358
iommu_iotlb_gather_queued(struct iommu_iotlb_gather * gather)1359 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1360 {
1361 return false;
1362 }
1363
iommu_dirty_bitmap_init(struct iommu_dirty_bitmap * dirty,struct iova_bitmap * bitmap,struct iommu_iotlb_gather * gather)1364 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1365 struct iova_bitmap *bitmap,
1366 struct iommu_iotlb_gather *gather)
1367 {
1368 }
1369
iommu_dirty_bitmap_record(struct iommu_dirty_bitmap * dirty,unsigned long iova,unsigned long length)1370 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1371 unsigned long iova,
1372 unsigned long length)
1373 {
1374 }
1375
iommu_device_unregister(struct iommu_device * iommu)1376 static inline void iommu_device_unregister(struct iommu_device *iommu)
1377 {
1378 }
1379
iommu_device_sysfs_add(struct iommu_device * iommu,struct device * parent,const struct attribute_group ** groups,const char * fmt,...)1380 static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
1381 struct device *parent,
1382 const struct attribute_group **groups,
1383 const char *fmt, ...)
1384 {
1385 return -ENODEV;
1386 }
1387
iommu_device_sysfs_remove(struct iommu_device * iommu)1388 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1389 {
1390 }
1391
iommu_device_link(struct device * dev,struct device * link)1392 static inline int iommu_device_link(struct device *dev, struct device *link)
1393 {
1394 return -EINVAL;
1395 }
1396
iommu_device_unlink(struct device * dev,struct device * link)1397 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1398 {
1399 }
1400
iommu_fwspec_init(struct device * dev,struct fwnode_handle * iommu_fwnode)1401 static inline int iommu_fwspec_init(struct device *dev,
1402 struct fwnode_handle *iommu_fwnode)
1403 {
1404 return -ENODEV;
1405 }
1406
iommu_fwspec_add_ids(struct device * dev,u32 * ids,int num_ids)1407 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1408 int num_ids)
1409 {
1410 return -ENODEV;
1411 }
1412
1413 static inline int
iommu_dev_enable_feature(struct device * dev,enum iommu_dev_features feat)1414 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1415 {
1416 return -ENODEV;
1417 }
1418
1419 static inline int
iommu_dev_disable_feature(struct device * dev,enum iommu_dev_features feat)1420 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1421 {
1422 return -ENODEV;
1423 }
1424
dev_iommu_fwspec_get(struct device * dev)1425 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1426 {
1427 return NULL;
1428 }
1429
iommu_device_use_default_domain(struct device * dev)1430 static inline int iommu_device_use_default_domain(struct device *dev)
1431 {
1432 return 0;
1433 }
1434
iommu_device_unuse_default_domain(struct device * dev)1435 static inline void iommu_device_unuse_default_domain(struct device *dev)
1436 {
1437 }
1438
1439 static inline int
iommu_group_claim_dma_owner(struct iommu_group * group,void * owner)1440 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1441 {
1442 return -ENODEV;
1443 }
1444
iommu_group_release_dma_owner(struct iommu_group * group)1445 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1446 {
1447 }
1448
iommu_group_dma_owner_claimed(struct iommu_group * group)1449 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1450 {
1451 return false;
1452 }
1453
iommu_device_release_dma_owner(struct device * dev)1454 static inline void iommu_device_release_dma_owner(struct device *dev)
1455 {
1456 }
1457
iommu_device_claim_dma_owner(struct device * dev,void * owner)1458 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1459 {
1460 return -ENODEV;
1461 }
1462
iommu_attach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid,struct iommu_attach_handle * handle)1463 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1464 struct device *dev, ioasid_t pasid,
1465 struct iommu_attach_handle *handle)
1466 {
1467 return -ENODEV;
1468 }
1469
iommu_detach_device_pasid(struct iommu_domain * domain,struct device * dev,ioasid_t pasid)1470 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1471 struct device *dev, ioasid_t pasid)
1472 {
1473 }
1474
iommu_alloc_global_pasid(struct device * dev)1475 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1476 {
1477 return IOMMU_PASID_INVALID;
1478 }
1479
iommu_free_global_pasid(ioasid_t pasid)1480 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1481 #endif /* CONFIG_IOMMU_API */
1482
1483 #ifdef CONFIG_IRQ_MSI_IOMMU
1484 #ifdef CONFIG_IOMMU_API
1485 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1486 #else
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1487 static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
1488 phys_addr_t msi_addr)
1489 {
1490 return 0;
1491 }
1492 #endif /* CONFIG_IOMMU_API */
1493 #endif /* CONFIG_IRQ_MSI_IOMMU */
1494
1495 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1496 void iommu_group_mutex_assert(struct device *dev);
1497 #else
iommu_group_mutex_assert(struct device * dev)1498 static inline void iommu_group_mutex_assert(struct device *dev)
1499 {
1500 }
1501 #endif
1502
1503 /**
1504 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1505 * @domain: The IOMMU domain to perform the mapping
1506 * @iova: The start address to map the buffer
1507 * @sgt: The sg_table object describing the buffer
1508 * @prot: IOMMU protection bits
1509 *
1510 * Creates a mapping at @iova for the buffer described by a scatterlist
1511 * stored in the given sg_table object in the provided IOMMU domain.
1512 */
iommu_map_sgtable(struct iommu_domain * domain,unsigned long iova,struct sg_table * sgt,int prot)1513 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1514 unsigned long iova, struct sg_table *sgt, int prot)
1515 {
1516 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1517 GFP_KERNEL);
1518 }
1519
1520 #ifdef CONFIG_IOMMU_DEBUGFS
1521 extern struct dentry *iommu_debugfs_dir;
1522 void iommu_debugfs_setup(void);
1523 #else
iommu_debugfs_setup(void)1524 static inline void iommu_debugfs_setup(void) {}
1525 #endif
1526
1527 #ifdef CONFIG_IOMMU_DMA
1528 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1529 #else /* CONFIG_IOMMU_DMA */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)1530 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1531 {
1532 return -ENODEV;
1533 }
1534 #endif /* CONFIG_IOMMU_DMA */
1535
1536 /*
1537 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1538 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1539 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1540 */
1541 #define TEGRA_STREAM_ID_BYPASS 0x7f
1542
tegra_dev_iommu_get_stream_id(struct device * dev,u32 * stream_id)1543 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1544 {
1545 #ifdef CONFIG_IOMMU_API
1546 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1547
1548 if (fwspec && fwspec->num_ids == 1) {
1549 *stream_id = fwspec->ids[0] & 0xffff;
1550 return true;
1551 }
1552 #endif
1553
1554 return false;
1555 }
1556
1557 #ifdef CONFIG_IOMMU_MM_DATA
mm_pasid_init(struct mm_struct * mm)1558 static inline void mm_pasid_init(struct mm_struct *mm)
1559 {
1560 /*
1561 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1562 * the new mm and the old one point to a same iommu_mm instance. When either
1563 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1564 * the other mm running into a use-after-free/double-free problem. To avoid
1565 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1566 */
1567 mm->iommu_mm = NULL;
1568 }
1569
mm_valid_pasid(struct mm_struct * mm)1570 static inline bool mm_valid_pasid(struct mm_struct *mm)
1571 {
1572 return READ_ONCE(mm->iommu_mm);
1573 }
1574
mm_get_enqcmd_pasid(struct mm_struct * mm)1575 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1576 {
1577 struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1578
1579 if (!iommu_mm)
1580 return IOMMU_PASID_INVALID;
1581 return iommu_mm->pasid;
1582 }
1583
1584 void mm_pasid_drop(struct mm_struct *mm);
1585 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1586 struct mm_struct *mm);
1587 void iommu_sva_unbind_device(struct iommu_sva *handle);
1588 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1589 #else
1590 static inline struct iommu_sva *
iommu_sva_bind_device(struct device * dev,struct mm_struct * mm)1591 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1592 {
1593 return ERR_PTR(-ENODEV);
1594 }
1595
iommu_sva_unbind_device(struct iommu_sva * handle)1596 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1597 {
1598 }
1599
iommu_sva_get_pasid(struct iommu_sva * handle)1600 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1601 {
1602 return IOMMU_PASID_INVALID;
1603 }
mm_pasid_init(struct mm_struct * mm)1604 static inline void mm_pasid_init(struct mm_struct *mm) {}
mm_valid_pasid(struct mm_struct * mm)1605 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1606
mm_get_enqcmd_pasid(struct mm_struct * mm)1607 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1608 {
1609 return IOMMU_PASID_INVALID;
1610 }
1611
mm_pasid_drop(struct mm_struct * mm)1612 static inline void mm_pasid_drop(struct mm_struct *mm) {}
1613 #endif /* CONFIG_IOMMU_SVA */
1614
1615 #ifdef CONFIG_IOMMU_IOPF
1616 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1617 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1618 int iopf_queue_flush_dev(struct device *dev);
1619 struct iopf_queue *iopf_queue_alloc(const char *name);
1620 void iopf_queue_free(struct iopf_queue *queue);
1621 int iopf_queue_discard_partial(struct iopf_queue *queue);
1622 void iopf_free_group(struct iopf_group *group);
1623 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1624 void iopf_group_response(struct iopf_group *group,
1625 enum iommu_page_response_code status);
1626 #else
1627 static inline int
iopf_queue_add_device(struct iopf_queue * queue,struct device * dev)1628 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1629 {
1630 return -ENODEV;
1631 }
1632
1633 static inline void
iopf_queue_remove_device(struct iopf_queue * queue,struct device * dev)1634 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1635 {
1636 }
1637
iopf_queue_flush_dev(struct device * dev)1638 static inline int iopf_queue_flush_dev(struct device *dev)
1639 {
1640 return -ENODEV;
1641 }
1642
iopf_queue_alloc(const char * name)1643 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1644 {
1645 return NULL;
1646 }
1647
iopf_queue_free(struct iopf_queue * queue)1648 static inline void iopf_queue_free(struct iopf_queue *queue)
1649 {
1650 }
1651
iopf_queue_discard_partial(struct iopf_queue * queue)1652 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1653 {
1654 return -ENODEV;
1655 }
1656
iopf_free_group(struct iopf_group * group)1657 static inline void iopf_free_group(struct iopf_group *group)
1658 {
1659 }
1660
1661 static inline int
iommu_report_device_fault(struct device * dev,struct iopf_fault * evt)1662 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1663 {
1664 return -ENODEV;
1665 }
1666
iopf_group_response(struct iopf_group * group,enum iommu_page_response_code status)1667 static inline void iopf_group_response(struct iopf_group *group,
1668 enum iommu_page_response_code status)
1669 {
1670 }
1671 #endif /* CONFIG_IOMMU_IOPF */
1672 #endif /* __LINUX_IOMMU_H */
1673