1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24
25 #ifndef _TTM_DEVICE_H_
26 #define _TTM_DEVICE_H_
27
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <drm/ttm/ttm_allocation.h>
31 #include <drm/ttm/ttm_resource.h>
32 #include <drm/ttm/ttm_pool.h>
33
34 struct ttm_device;
35 struct ttm_placement;
36 struct ttm_buffer_object;
37 struct ttm_operation_ctx;
38
39 /**
40 * struct ttm_global - Buffer object driver global data.
41 */
42 extern struct ttm_global {
43
44 /**
45 * @dummy_read_page: Pointer to a dummy page used for mapping requests
46 * of unpopulated pages. Constant after init.
47 */
48 struct page *dummy_read_page;
49
50 /**
51 * @device_list: List of buffer object devices. Protected by
52 * ttm_global_mutex.
53 */
54 struct list_head device_list;
55
56 /**
57 * @bo_count: Number of buffer objects allocated by devices.
58 */
59 atomic_t bo_count;
60 } ttm_glob;
61
62 struct ttm_device_funcs {
63 /**
64 * ttm_tt_create
65 *
66 * @bo: The buffer object to create the ttm for.
67 * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
68 *
69 * Create a struct ttm_tt to back data with system memory pages.
70 * No pages are actually allocated.
71 * Returns:
72 * NULL: Out of memory.
73 */
74 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
75 uint32_t page_flags);
76
77 /**
78 * ttm_tt_populate
79 *
80 * @ttm: The struct ttm_tt to contain the backing pages.
81 *
82 * Allocate all backing pages
83 * Returns:
84 * -ENOMEM: Out of memory.
85 */
86 int (*ttm_tt_populate)(struct ttm_device *bdev,
87 struct ttm_tt *ttm,
88 struct ttm_operation_ctx *ctx);
89
90 /**
91 * ttm_tt_unpopulate
92 *
93 * @ttm: The struct ttm_tt to contain the backing pages.
94 *
95 * Free all backing page
96 */
97 void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
98 struct ttm_tt *ttm);
99
100 /**
101 * ttm_tt_destroy
102 *
103 * @bdev: Pointer to a ttm device
104 * @ttm: Pointer to a struct ttm_tt.
105 *
106 * Destroy the backend. This will be call back from ttm_tt_destroy so
107 * don't call ttm_tt_destroy from the callback or infinite loop.
108 */
109 void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
110
111 /**
112 * struct ttm_bo_driver member eviction_valuable
113 *
114 * @bo: the buffer object to be evicted
115 * @place: placement we need room for
116 *
117 * Check with the driver if it is valuable to evict a BO to make room
118 * for a certain placement.
119 */
120 bool (*eviction_valuable)(struct ttm_buffer_object *bo,
121 const struct ttm_place *place);
122 /**
123 * struct ttm_bo_driver member evict_flags:
124 *
125 * @bo: the buffer object to be evicted
126 *
127 * Return the bo flags for a buffer which is not mapped to the hardware.
128 * These will be placed in proposed_flags so that when the move is
129 * finished, they'll end up in bo->mem.flags
130 * This should not cause multihop evictions, and the core will warn
131 * if one is proposed.
132 */
133
134 void (*evict_flags)(struct ttm_buffer_object *bo,
135 struct ttm_placement *placement);
136
137 /**
138 * struct ttm_bo_driver member move:
139 *
140 * @bo: the buffer to move
141 * @evict: whether this motion is evicting the buffer from
142 * the graphics address space
143 * @ctx: context for this move with parameters
144 * @new_mem: the new memory region receiving the buffer
145 * @hop: placement for driver directed intermediate hop
146 *
147 * Move a buffer between two memory regions.
148 * Returns errno -EMULTIHOP if driver requests a hop
149 */
150 int (*move)(struct ttm_buffer_object *bo, bool evict,
151 struct ttm_operation_ctx *ctx,
152 struct ttm_resource *new_mem,
153 struct ttm_place *hop);
154
155 /**
156 * Hook to notify driver about a resource delete.
157 */
158 void (*delete_mem_notify)(struct ttm_buffer_object *bo);
159
160 /**
161 * notify the driver that we're about to swap out this bo
162 */
163 void (*swap_notify)(struct ttm_buffer_object *bo);
164
165 /**
166 * Driver callback on when mapping io memory (for bo_move_memcpy
167 * for instance). TTM will take care to call io_mem_free whenever
168 * the mapping is not use anymore. io_mem_reserve & io_mem_free
169 * are balanced.
170 */
171 int (*io_mem_reserve)(struct ttm_device *bdev,
172 struct ttm_resource *mem);
173 void (*io_mem_free)(struct ttm_device *bdev,
174 struct ttm_resource *mem);
175
176 /**
177 * Return the pfn for a given page_offset inside the BO.
178 *
179 * @bo: the BO to look up the pfn for
180 * @page_offset: the offset to look up
181 */
182 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
183 unsigned long page_offset);
184
185 /**
186 * Read/write memory buffers for ptrace access
187 *
188 * @bo: the BO to access
189 * @offset: the offset from the start of the BO
190 * @buf: pointer to source/destination buffer
191 * @len: number of bytes to copy
192 * @write: whether to read (0) from or write (non-0) to BO
193 *
194 * If successful, this function should return the number of
195 * bytes copied, -EIO otherwise. If the number of bytes
196 * returned is < len, the function may be called again with
197 * the remainder of the buffer to copy.
198 */
199 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
200 void *buf, int len, int write);
201
202 /**
203 * Notify the driver that we're about to release a BO
204 *
205 * @bo: BO that is about to be released
206 *
207 * Gives the driver a chance to do any cleanup, including
208 * adding fences that may force a delayed delete
209 */
210 void (*release_notify)(struct ttm_buffer_object *bo);
211 };
212
213 /**
214 * struct ttm_device - Buffer object driver device-specific data.
215 */
216 struct ttm_device {
217 /**
218 * @device_list: Our entry in the global device list.
219 * Constant after bo device init
220 */
221 struct list_head device_list;
222
223 /**
224 * @alloc_flags: TTM_ALLOCATION_* flags.
225 */
226 unsigned int alloc_flags;
227
228 /**
229 * @funcs: Function table for the device.
230 * Constant after bo device init
231 */
232 const struct ttm_device_funcs *funcs;
233
234 /**
235 * @sysman: Resource manager for the system domain.
236 * Access via ttm_manager_type.
237 */
238 struct ttm_resource_manager sysman;
239
240 /**
241 * @man_drv: An array of resource_managers, one per resource type.
242 */
243 struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
244
245 /**
246 * @vma_manager: Address space manager for finding BOs to mmap.
247 */
248 struct drm_vma_offset_manager *vma_manager;
249
250 /**
251 * @pool: page pool for the device.
252 */
253 struct ttm_pool pool;
254
255 /**
256 * @lru_lock: Protection for the per manager LRU and ddestroy lists.
257 */
258 spinlock_t lru_lock;
259
260 /**
261 * @unevictable: Buffer objects which are pinned or swapped and as such
262 * not on an LRU list.
263 */
264 struct list_head unevictable;
265
266 /**
267 * @dev_mapping: A pointer to the struct address_space for invalidating
268 * CPU mappings on buffer move. Protected by load/unload sync.
269 */
270 struct address_space *dev_mapping;
271
272 /**
273 * @wq: Work queue structure for the delayed delete workqueue.
274 */
275 struct workqueue_struct *wq;
276 };
277
278 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
279 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
280 gfp_t gfp_flags);
281 int ttm_device_prepare_hibernation(struct ttm_device *bdev);
282
283 static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device * bdev,int mem_type)284 ttm_manager_type(struct ttm_device *bdev, int mem_type)
285 {
286 BUILD_BUG_ON(__builtin_constant_p(mem_type)
287 && mem_type >= TTM_NUM_MEM_TYPES);
288 return bdev->man_drv[mem_type];
289 }
290
ttm_set_driver_manager(struct ttm_device * bdev,int type,struct ttm_resource_manager * manager)291 static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
292 struct ttm_resource_manager *manager)
293 {
294 BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
295 bdev->man_drv[type] = manager;
296 }
297
298 int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
299 struct device *dev, struct address_space *mapping,
300 struct drm_vma_offset_manager *vma_manager,
301 unsigned int alloc_flags);
302 void ttm_device_fini(struct ttm_device *bdev);
303 void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
304
305 #endif
306