1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /*
3 * Copyright 2020 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #ifndef _XE_RES_CURSOR_H_
25 #define _XE_RES_CURSOR_H_
26
27 #include <linux/scatterlist.h>
28
29 #include <drm/drm_pagemap.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_range_manager.h>
32 #include <drm/ttm/ttm_resource.h>
33 #include <drm/ttm/ttm_tt.h>
34
35 #include "xe_bo.h"
36 #include "xe_device.h"
37 #include "xe_macros.h"
38 #include "xe_svm.h"
39 #include "xe_ttm_vram_mgr.h"
40
41 /**
42 * struct xe_res_cursor - state for walking over dma mapping, vram_mgr,
43 * stolen_mgr, and gtt_mgr allocations
44 */
45 struct xe_res_cursor {
46 /** @start: Start of cursor */
47 u64 start;
48 /** @size: Size of the current segment. */
49 u64 size;
50 /** @remaining: Remaining bytes in cursor */
51 u64 remaining;
52 /** @node: Opaque point current node cursor */
53 void *node;
54 /** @mem_type: Memory type */
55 u32 mem_type;
56 /** @sgl: Scatterlist for cursor */
57 struct scatterlist *sgl;
58 /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
59 const struct drm_pagemap_device_addr *dma_addr;
60 /** @mm: Buddy allocator for VRAM cursor */
61 struct drm_buddy *mm;
62 /**
63 * @dma_start: DMA start address for the current segment.
64 * This may be different to @dma_addr.addr since elements in
65 * the array may be coalesced to a single segment.
66 */
67 u64 dma_start;
68 /** @dma_seg_size: Size of the current DMA segment. */
69 u64 dma_seg_size;
70 };
71
xe_res_get_buddy(struct ttm_resource * res)72 static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
73 {
74 struct ttm_resource_manager *mgr;
75
76 mgr = ttm_manager_type(res->bo->bdev, res->mem_type);
77 return &to_xe_ttm_vram_mgr(mgr)->mm;
78 }
79
80 /**
81 * xe_res_first - initialize a xe_res_cursor
82 *
83 * @res: TTM resource object to walk
84 * @start: Start of the range
85 * @size: Size of the range
86 * @cur: cursor object to initialize
87 *
88 * Start walking over the range of allocations between @start and @size.
89 */
xe_res_first(struct ttm_resource * res,u64 start,u64 size,struct xe_res_cursor * cur)90 static inline void xe_res_first(struct ttm_resource *res,
91 u64 start, u64 size,
92 struct xe_res_cursor *cur)
93 {
94 cur->sgl = NULL;
95 cur->dma_addr = NULL;
96 if (!res)
97 goto fallback;
98
99 XE_WARN_ON(start + size > res->size);
100
101 cur->mem_type = res->mem_type;
102
103 switch (cur->mem_type) {
104 case XE_PL_STOLEN:
105 case XE_PL_VRAM0:
106 case XE_PL_VRAM1: {
107 struct drm_buddy_block *block;
108 struct list_head *head, *next;
109 struct drm_buddy *mm = xe_res_get_buddy(res);
110
111 head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
112
113 block = list_first_entry_or_null(head,
114 struct drm_buddy_block,
115 link);
116 if (!block)
117 goto fallback;
118
119 while (start >= drm_buddy_block_size(mm, block)) {
120 start -= drm_buddy_block_size(mm, block);
121
122 next = block->link.next;
123 if (next != head)
124 block = list_entry(next, struct drm_buddy_block,
125 link);
126 }
127
128 cur->mm = mm;
129 cur->start = drm_buddy_block_offset(block) + start;
130 cur->size = min(drm_buddy_block_size(mm, block) - start,
131 size);
132 cur->remaining = size;
133 cur->node = block;
134 break;
135 }
136 default:
137 goto fallback;
138 }
139
140 return;
141
142 fallback:
143 cur->start = start;
144 cur->size = size;
145 cur->remaining = size;
146 cur->node = NULL;
147 cur->mem_type = XE_PL_TT;
148 XE_WARN_ON(res && start + size > res->size);
149 }
150
__xe_res_sg_next(struct xe_res_cursor * cur)151 static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
152 {
153 struct scatterlist *sgl = cur->sgl;
154 u64 start = cur->start;
155
156 while (start >= sg_dma_len(sgl)) {
157 start -= sg_dma_len(sgl);
158 sgl = sg_next(sgl);
159 XE_WARN_ON(!sgl);
160 }
161
162 cur->start = start;
163 cur->size = sg_dma_len(sgl) - start;
164 cur->sgl = sgl;
165 }
166
167 /**
168 * __xe_res_dma_next() - Advance the cursor when end-of-segment is reached
169 * @cur: The cursor
170 */
__xe_res_dma_next(struct xe_res_cursor * cur)171 static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
172 {
173 const struct drm_pagemap_device_addr *addr = cur->dma_addr;
174 u64 start = cur->start;
175
176 while (start >= cur->dma_seg_size) {
177 start -= cur->dma_seg_size;
178 addr++;
179 cur->dma_seg_size = PAGE_SIZE << addr->order;
180 }
181 cur->dma_start = addr->addr;
182
183 /* Coalesce array_elements */
184 while (cur->dma_seg_size - start < cur->remaining) {
185 if (cur->dma_start + cur->dma_seg_size != addr[1].addr ||
186 addr->proto != addr[1].proto)
187 break;
188 addr++;
189 cur->dma_seg_size += PAGE_SIZE << addr->order;
190 }
191
192 cur->dma_addr = addr;
193 cur->start = start;
194 cur->size = cur->dma_seg_size - start;
195 }
196
197 /**
198 * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
199 *
200 * @sg: scatter gather table to walk
201 * @start: Start of the range
202 * @size: Size of the range
203 * @cur: cursor object to initialize
204 *
205 * Start walking over the range of allocations between @start and @size.
206 */
xe_res_first_sg(const struct sg_table * sg,u64 start,u64 size,struct xe_res_cursor * cur)207 static inline void xe_res_first_sg(const struct sg_table *sg,
208 u64 start, u64 size,
209 struct xe_res_cursor *cur)
210 {
211 XE_WARN_ON(!sg);
212 cur->node = NULL;
213 cur->start = start;
214 cur->remaining = size;
215 cur->size = 0;
216 cur->dma_addr = NULL;
217 cur->sgl = sg->sgl;
218 cur->mem_type = XE_PL_TT;
219 __xe_res_sg_next(cur);
220 }
221
222 /**
223 * xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
224 *
225 * @dma_addr: struct drm_pagemap_device_addr array to walk
226 * @start: Start of the range
227 * @size: Size of the range
228 * @cur: cursor object to initialize
229 *
230 * Start walking over the range of allocations between @start and @size.
231 */
xe_res_first_dma(const struct drm_pagemap_device_addr * dma_addr,u64 start,u64 size,struct xe_res_cursor * cur)232 static inline void xe_res_first_dma(const struct drm_pagemap_device_addr *dma_addr,
233 u64 start, u64 size,
234 struct xe_res_cursor *cur)
235 {
236 XE_WARN_ON(!dma_addr);
237 XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
238 !IS_ALIGNED(size, PAGE_SIZE));
239
240 cur->node = NULL;
241 cur->start = start;
242 cur->remaining = size;
243 cur->dma_seg_size = PAGE_SIZE << dma_addr->order;
244 cur->dma_start = 0;
245 cur->size = 0;
246 cur->dma_addr = dma_addr;
247 __xe_res_dma_next(cur);
248 cur->sgl = NULL;
249 cur->mem_type = XE_PL_TT;
250 }
251
252 /**
253 * xe_res_next - advance the cursor
254 *
255 * @cur: the cursor to advance
256 * @size: number of bytes to move forward
257 *
258 * Move the cursor @size bytes forwrad, walking to the next node if necessary.
259 */
xe_res_next(struct xe_res_cursor * cur,u64 size)260 static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
261 {
262 struct drm_buddy_block *block;
263 struct list_head *next;
264 u64 start;
265
266 XE_WARN_ON(size > cur->remaining);
267
268 cur->remaining -= size;
269 if (!cur->remaining)
270 return;
271
272 if (cur->size > size) {
273 cur->size -= size;
274 cur->start += size;
275 return;
276 }
277
278 if (cur->dma_addr) {
279 cur->start += size;
280 __xe_res_dma_next(cur);
281 return;
282 }
283
284 if (cur->sgl) {
285 cur->start += size;
286 __xe_res_sg_next(cur);
287 return;
288 }
289
290 switch (cur->mem_type) {
291 case XE_PL_STOLEN:
292 case XE_PL_VRAM0:
293 case XE_PL_VRAM1:
294 start = size - cur->size;
295 block = cur->node;
296
297 next = block->link.next;
298 block = list_entry(next, struct drm_buddy_block, link);
299
300
301 while (start >= drm_buddy_block_size(cur->mm, block)) {
302 start -= drm_buddy_block_size(cur->mm, block);
303
304 next = block->link.next;
305 block = list_entry(next, struct drm_buddy_block, link);
306 }
307
308 cur->start = drm_buddy_block_offset(block) + start;
309 cur->size = min(drm_buddy_block_size(cur->mm, block) - start,
310 cur->remaining);
311 cur->node = block;
312 break;
313 default:
314 return;
315 }
316 }
317
318 /**
319 * xe_res_dma - return dma address of cursor at current position
320 *
321 * @cur: the cursor to return the dma address from
322 */
xe_res_dma(const struct xe_res_cursor * cur)323 static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
324 {
325 if (cur->dma_addr)
326 return cur->dma_start + cur->start;
327 else if (cur->sgl)
328 return sg_dma_address(cur->sgl) + cur->start;
329 else
330 return cur->start;
331 }
332
333 /**
334 * xe_res_is_vram() - Whether the cursor current dma address points to
335 * same-device VRAM
336 * @cur: The cursor.
337 *
338 * Return: true iff the address returned by xe_res_dma() points to internal vram.
339 */
xe_res_is_vram(const struct xe_res_cursor * cur)340 static inline bool xe_res_is_vram(const struct xe_res_cursor *cur)
341 {
342 if (cur->dma_addr)
343 return cur->dma_addr->proto == XE_INTERCONNECT_VRAM;
344
345 switch (cur->mem_type) {
346 case XE_PL_STOLEN:
347 case XE_PL_VRAM0:
348 case XE_PL_VRAM1:
349 return true;
350 default:
351 break;
352 }
353
354 return false;
355 }
356 #endif
357