xref: /linux/include/drm/drm_pagemap.h (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 /* SPDX-License-Identifier: MIT */
2 #ifndef _DRM_PAGEMAP_H_
3 #define _DRM_PAGEMAP_H_
4 
5 #include <linux/dma-direction.h>
6 #include <linux/hmm.h>
7 #include <linux/types.h>
8 
9 struct drm_pagemap;
10 struct drm_pagemap_zdd;
11 struct device;
12 
13 /**
14  * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
15  *
16  * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
17  * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
18  */
19 enum drm_interconnect_protocol {
20 	DRM_INTERCONNECT_SYSTEM,
21 	DRM_INTERCONNECT_DRIVER,
22 	/* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
23 };
24 
25 /**
26  * struct drm_pagemap_device_addr - Device address representation.
27  * @addr: The dma address or driver-defined address for driver private interconnects.
28  * @proto: The interconnect protocol.
29  * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
30  * @dir: The DMA direction.
31  *
32  * Note: There is room for improvement here. We should be able to pack into
33  * 64 bits.
34  */
35 struct drm_pagemap_device_addr {
36 	dma_addr_t addr;
37 	u64 proto : 54;
38 	u64 order : 8;
39 	u64 dir : 2;
40 };
41 
42 /**
43  * drm_pagemap_device_addr_encode() - Encode a dma address with metadata
44  * @addr: The dma address or driver-defined address for driver private interconnects.
45  * @proto: The interconnect protocol.
46  * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
47  * @dir: The DMA direction.
48  *
49  * Return: A struct drm_pagemap_device_addr encoding the above information.
50  */
51 static inline struct drm_pagemap_device_addr
drm_pagemap_device_addr_encode(dma_addr_t addr,enum drm_interconnect_protocol proto,unsigned int order,enum dma_data_direction dir)52 drm_pagemap_device_addr_encode(dma_addr_t addr,
53 			       enum drm_interconnect_protocol proto,
54 			       unsigned int order,
55 			       enum dma_data_direction dir)
56 {
57 	return (struct drm_pagemap_device_addr) {
58 		.addr = addr,
59 		.proto = proto,
60 		.order = order,
61 		.dir = dir,
62 	};
63 }
64 
65 /**
66  * struct drm_pagemap_ops: Ops for a drm-pagemap.
67  */
68 struct drm_pagemap_ops {
69 	/**
70 	 * @device_map: Map for device access or provide a virtual address suitable for
71 	 *
72 	 * @dpagemap: The struct drm_pagemap for the page.
73 	 * @dev: The device mapper.
74 	 * @page: The page to map.
75 	 * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
76 	 * @dir: The transfer direction.
77 	 */
78 	struct drm_pagemap_device_addr (*device_map)(struct drm_pagemap *dpagemap,
79 						     struct device *dev,
80 						     struct page *page,
81 						     unsigned int order,
82 						     enum dma_data_direction dir);
83 
84 	/**
85 	 * @device_unmap: Unmap a device address previously obtained using @device_map.
86 	 *
87 	 * @dpagemap: The struct drm_pagemap for the mapping.
88 	 * @dev: The device unmapper.
89 	 * @addr: The device address obtained when mapping.
90 	 */
91 	void (*device_unmap)(struct drm_pagemap *dpagemap,
92 			     struct device *dev,
93 			     struct drm_pagemap_device_addr addr);
94 
95 	/**
96 	 * @populate_mm: Populate part of the mm with @dpagemap memory,
97 	 * migrating existing data.
98 	 * @dpagemap: The struct drm_pagemap managing the memory.
99 	 * @start: The virtual start address in @mm
100 	 * @end: The virtual end address in @mm
101 	 * @mm: Pointer to a live mm. The caller must have an mmget()
102 	 * reference.
103 	 *
104 	 * The caller will have the mm lock at least in read mode.
105 	 * Note that there is no guarantee that the memory is resident
106 	 * after the function returns, it's best effort only.
107 	 * When the mm is not using the memory anymore,
108 	 * it will be released. The struct drm_pagemap might have a
109 	 * mechanism in place to reclaim the memory and the data will
110 	 * then be migrated. Typically to system memory.
111 	 * The implementation should hold sufficient runtime power-
112 	 * references while pages are used in an address space and
113 	 * should ideally guard against hardware device unbind in
114 	 * a way such that device pages are migrated back to system
115 	 * followed by device page removal. The implementation should
116 	 * return -ENODEV after device removal.
117 	 *
118 	 * Return: 0 if successful. Negative error code on error.
119 	 */
120 	int (*populate_mm)(struct drm_pagemap *dpagemap,
121 			   unsigned long start, unsigned long end,
122 			   struct mm_struct *mm,
123 			   unsigned long timeslice_ms);
124 };
125 
126 /**
127  * struct drm_pagemap: Additional information for a struct dev_pagemap
128  * used for device p2p handshaking.
129  * @ops: The struct drm_pagemap_ops.
130  * @dev: The struct drevice owning the device-private memory.
131  */
132 struct drm_pagemap {
133 	const struct drm_pagemap_ops *ops;
134 	struct device *dev;
135 };
136 
137 struct drm_pagemap_devmem;
138 
139 /**
140  * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
141  *
142  * This structure defines the operations for GPU Shared Virtual Memory (SVM)
143  * device memory. These operations are provided by the GPU driver to manage device memory
144  * allocations and perform operations such as migration between device memory and system
145  * RAM.
146  */
147 struct drm_pagemap_devmem_ops {
148 	/**
149 	 * @devmem_release: Release device memory allocation (optional)
150 	 * @devmem_allocation: device memory allocation
151 	 *
152 	 * Release device memory allocation and drop a reference to device
153 	 * memory allocation.
154 	 */
155 	void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
156 
157 	/**
158 	 * @populate_devmem_pfn: Populate device memory PFN (required for migration)
159 	 * @devmem_allocation: device memory allocation
160 	 * @npages: Number of pages to populate
161 	 * @pfn: Array of page frame numbers to populate
162 	 *
163 	 * Populate device memory page frame numbers (PFN).
164 	 *
165 	 * Return: 0 on success, a negative error code on failure.
166 	 */
167 	int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
168 				   unsigned long npages, unsigned long *pfn);
169 
170 	/**
171 	 * @copy_to_devmem: Copy to device memory (required for migration)
172 	 * @pages: Pointer to array of device memory pages (destination)
173 	 * @dma_addr: Pointer to array of DMA addresses (source)
174 	 * @npages: Number of pages to copy
175 	 *
176 	 * Copy pages to device memory.
177 	 *
178 	 * Return: 0 on success, a negative error code on failure.
179 	 */
180 	int (*copy_to_devmem)(struct page **pages,
181 			      dma_addr_t *dma_addr,
182 			      unsigned long npages);
183 
184 	/**
185 	 * @copy_to_ram: Copy to system RAM (required for migration)
186 	 * @pages: Pointer to array of device memory pages (source)
187 	 * @dma_addr: Pointer to array of DMA addresses (destination)
188 	 * @npages: Number of pages to copy
189 	 *
190 	 * Copy pages to system RAM.
191 	 *
192 	 * Return: 0 on success, a negative error code on failure.
193 	 */
194 	int (*copy_to_ram)(struct page **pages,
195 			   dma_addr_t *dma_addr,
196 			   unsigned long npages);
197 };
198 
199 /**
200  * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
201  *
202  * @dev: Pointer to the device structure which device memory allocation belongs to
203  * @mm: Pointer to the mm_struct for the address space
204  * @detached: device memory allocations is detached from device pages
205  * @ops: Pointer to the operations structure for GPU SVM device memory
206  * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
207  * @size: Size of device memory allocation
208  * @timeslice_expiration: Timeslice expiration in jiffies
209  */
210 struct drm_pagemap_devmem {
211 	struct device *dev;
212 	struct mm_struct *mm;
213 	struct completion detached;
214 	const struct drm_pagemap_devmem_ops *ops;
215 	struct drm_pagemap *dpagemap;
216 	size_t size;
217 	u64 timeslice_expiration;
218 };
219 
220 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
221 				  struct mm_struct *mm,
222 				  unsigned long start, unsigned long end,
223 				  unsigned long timeslice_ms,
224 				  void *pgmap_owner);
225 
226 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
227 
228 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
229 
230 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
231 
232 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
233 			     struct device *dev, struct mm_struct *mm,
234 			     const struct drm_pagemap_devmem_ops *ops,
235 			     struct drm_pagemap *dpagemap, size_t size);
236 
237 int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
238 			    unsigned long start, unsigned long end,
239 			    struct mm_struct *mm,
240 			    unsigned long timeslice_ms);
241 
242 #endif
243