1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32
33 #include <linux/dma-mapping.h>
34 #include <linux/iommu.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched/task.h>
37 #include <linux/sched/mm.h>
38 #include <linux/seq_file.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
41 #include <linux/dma-buf.h>
42 #include <linux/sizes.h>
43 #include <linux/module.h>
44
45 #include <drm/drm_drv.h>
46 #include <drm/ttm/ttm_bo.h>
47 #include <drm/ttm/ttm_placement.h>
48 #include <drm/ttm/ttm_range_manager.h>
49 #include <drm/ttm/ttm_tt.h>
50
51 #include <drm/amdgpu_drm.h>
52
53 #include "amdgpu.h"
54 #include "amdgpu_object.h"
55 #include "amdgpu_trace.h"
56 #include "amdgpu_amdkfd.h"
57 #include "amdgpu_sdma.h"
58 #include "amdgpu_ras.h"
59 #include "amdgpu_hmm.h"
60 #include "amdgpu_atomfirmware.h"
61 #include "amdgpu_res_cursor.h"
62 #include "bif/bif_4_1_d.h"
63
64 MODULE_IMPORT_NS("DMA_BUF");
65
66 #define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
67
68 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
69 struct ttm_tt *ttm,
70 struct ttm_resource *bo_mem);
71 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
72 struct ttm_tt *ttm);
73
amdgpu_ttm_init_on_chip(struct amdgpu_device * adev,unsigned int type,uint64_t size_in_page)74 static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
75 unsigned int type,
76 uint64_t size_in_page)
77 {
78 return ttm_range_man_init(&adev->mman.bdev, type,
79 false, size_in_page);
80 }
81
82 /**
83 * amdgpu_evict_flags - Compute placement flags
84 *
85 * @bo: The buffer object to evict
86 * @placement: Possible destination(s) for evicted BO
87 *
88 * Fill in placement data when ttm_bo_evict() is called
89 */
amdgpu_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * placement)90 static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
91 struct ttm_placement *placement)
92 {
93 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
94 struct amdgpu_bo *abo;
95 static const struct ttm_place placements = {
96 .fpfn = 0,
97 .lpfn = 0,
98 .mem_type = TTM_PL_SYSTEM,
99 .flags = 0
100 };
101
102 /* Don't handle scatter gather BOs */
103 if (bo->type == ttm_bo_type_sg) {
104 placement->num_placement = 0;
105 return;
106 }
107
108 /* Object isn't an AMDGPU object so ignore */
109 if (!amdgpu_bo_is_amdgpu_bo(bo)) {
110 placement->placement = &placements;
111 placement->num_placement = 1;
112 return;
113 }
114
115 abo = ttm_to_amdgpu_bo(bo);
116 if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
117 placement->num_placement = 0;
118 return;
119 }
120
121 switch (bo->resource->mem_type) {
122 case AMDGPU_PL_GDS:
123 case AMDGPU_PL_GWS:
124 case AMDGPU_PL_OA:
125 case AMDGPU_PL_DOORBELL:
126 case AMDGPU_PL_MMIO_REMAP:
127 placement->num_placement = 0;
128 return;
129
130 case TTM_PL_VRAM:
131 if (!adev->mman.buffer_funcs_enabled) {
132 /* Move to system memory */
133 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
134
135 } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
136 !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
137 amdgpu_res_cpu_visible(adev, bo->resource)) {
138
139 /* Try evicting to the CPU inaccessible part of VRAM
140 * first, but only set GTT as busy placement, so this
141 * BO will be evicted to GTT rather than causing other
142 * BOs to be evicted from VRAM
143 */
144 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
145 AMDGPU_GEM_DOMAIN_GTT |
146 AMDGPU_GEM_DOMAIN_CPU);
147 abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
148 abo->placements[0].lpfn = 0;
149 abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
150 } else {
151 /* Move to GTT memory */
152 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
153 AMDGPU_GEM_DOMAIN_CPU);
154 }
155 break;
156 case TTM_PL_TT:
157 case AMDGPU_PL_PREEMPT:
158 default:
159 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
160 break;
161 }
162 *placement = abo->placement;
163 }
164
165 static struct dma_fence *
amdgpu_ttm_job_submit(struct amdgpu_device * adev,struct amdgpu_job * job,u32 num_dw)166 amdgpu_ttm_job_submit(struct amdgpu_device *adev, struct amdgpu_job *job, u32 num_dw)
167 {
168 struct amdgpu_ring *ring;
169
170 ring = adev->mman.buffer_funcs_ring;
171 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
172 WARN_ON(job->ibs[0].length_dw > num_dw);
173
174 return amdgpu_job_submit(job);
175 }
176
177 /**
178 * amdgpu_ttm_map_buffer - Map memory into the GART windows
179 * @entity: entity to run the window setup job
180 * @bo: buffer object to map
181 * @mem: memory object to map
182 * @mm_cur: range to map
183 * @window: which GART window to use
184 * @tmz: if we should setup a TMZ enabled mapping
185 * @size: in number of bytes to map, out number of bytes mapped
186 * @addr: resulting address inside the MC address space
187 *
188 * Setup one of the GART windows to access a specific piece of memory or return
189 * the physical address for local memory.
190 */
amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity * entity,struct ttm_buffer_object * bo,struct ttm_resource * mem,struct amdgpu_res_cursor * mm_cur,unsigned int window,bool tmz,uint64_t * size,uint64_t * addr)191 static int amdgpu_ttm_map_buffer(struct amdgpu_ttm_buffer_entity *entity,
192 struct ttm_buffer_object *bo,
193 struct ttm_resource *mem,
194 struct amdgpu_res_cursor *mm_cur,
195 unsigned int window,
196 bool tmz, uint64_t *size, uint64_t *addr)
197 {
198 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
199 unsigned int offset, num_pages, num_dw, num_bytes;
200 uint64_t src_addr, dst_addr;
201 struct amdgpu_job *job;
202 void *cpu_addr;
203 uint64_t flags;
204 int r;
205
206 BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
207 AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
208
209 if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
210 return -EINVAL;
211
212 /* Map only what can't be accessed directly */
213 if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
214 *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
215 mm_cur->start;
216 return 0;
217 }
218
219
220 /*
221 * If start begins at an offset inside the page, then adjust the size
222 * and addr accordingly
223 */
224 offset = mm_cur->start & ~PAGE_MASK;
225
226 num_pages = PFN_UP(*size + offset);
227 num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
228
229 *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
230
231 *addr = adev->gmc.gart_start;
232 *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
233 AMDGPU_GPU_PAGE_SIZE;
234 *addr += offset;
235
236 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
237 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
238
239 r = amdgpu_job_alloc_with_ib(adev, &entity->base,
240 AMDGPU_FENCE_OWNER_UNDEFINED,
241 num_dw * 4 + num_bytes,
242 AMDGPU_IB_POOL_DELAYED, &job,
243 AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
244 if (r)
245 return r;
246
247 src_addr = num_dw * 4;
248 src_addr += job->ibs[0].gpu_addr;
249
250 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
251 dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
252 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
253 dst_addr, num_bytes, 0);
254
255 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
256 if (tmz)
257 flags |= AMDGPU_PTE_TMZ;
258
259 cpu_addr = &job->ibs[0].ptr[num_dw];
260
261 if (mem->mem_type == TTM_PL_TT) {
262 dma_addr_t *dma_addr;
263
264 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
265 amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
266 } else {
267 u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
268
269 amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
270 }
271
272 dma_fence_put(amdgpu_ttm_job_submit(adev, job, num_dw));
273 return 0;
274 }
275
276 /**
277 * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
278 * @adev: amdgpu device
279 * @entity: entity to run the jobs
280 * @src: buffer/address where to read from
281 * @dst: buffer/address where to write to
282 * @size: number of bytes to copy
283 * @tmz: if a secure copy should be used
284 * @resv: resv object to sync to
285 * @f: Returns the last fence if multiple jobs are submitted.
286 *
287 * The function copies @size bytes from {src->mem + src->offset} to
288 * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
289 * move and different for a BO to BO copy.
290 *
291 */
292 __attribute__((nonnull))
amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,const struct amdgpu_copy_mem * src,const struct amdgpu_copy_mem * dst,uint64_t size,bool tmz,struct dma_resv * resv,struct dma_fence ** f)293 static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
294 struct amdgpu_ttm_buffer_entity *entity,
295 const struct amdgpu_copy_mem *src,
296 const struct amdgpu_copy_mem *dst,
297 uint64_t size, bool tmz,
298 struct dma_resv *resv,
299 struct dma_fence **f)
300 {
301 struct amdgpu_res_cursor src_mm, dst_mm;
302 struct dma_fence *fence = NULL;
303 int r = 0;
304 uint32_t copy_flags = 0;
305 struct amdgpu_bo *abo_src, *abo_dst;
306
307 if (!adev->mman.buffer_funcs_enabled) {
308 dev_err(adev->dev,
309 "Trying to move memory with ring turned off.\n");
310 return -EINVAL;
311 }
312
313 amdgpu_res_first(src->mem, src->offset, size, &src_mm);
314 amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
315
316 mutex_lock(&adev->mman.gtt_window_lock);
317 while (src_mm.remaining) {
318 uint64_t from, to, cur_size, tiling_flags;
319 uint32_t num_type, data_format, max_com, write_compress_disable;
320 struct dma_fence *next;
321
322 /* Never copy more than 256MiB at once to avoid a timeout */
323 cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
324
325 /* Map src to window 0 and dst to window 1. */
326 r = amdgpu_ttm_map_buffer(entity, src->bo, src->mem, &src_mm,
327 0, tmz, &cur_size, &from);
328 if (r)
329 goto error;
330
331 r = amdgpu_ttm_map_buffer(entity, dst->bo, dst->mem, &dst_mm,
332 1, tmz, &cur_size, &to);
333 if (r)
334 goto error;
335
336 abo_src = ttm_to_amdgpu_bo(src->bo);
337 abo_dst = ttm_to_amdgpu_bo(dst->bo);
338 if (tmz)
339 copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
340 if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
341 (abo_src->tbo.resource->mem_type == TTM_PL_VRAM))
342 copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
343 if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
344 (dst->mem->mem_type == TTM_PL_VRAM)) {
345 copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
346 amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags);
347 max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
348 num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
349 data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
350 write_compress_disable =
351 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
352 copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
353 AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
354 AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
355 AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
356 write_compress_disable));
357 }
358
359 r = amdgpu_copy_buffer(adev, entity, from, to, cur_size, resv,
360 &next, true, copy_flags);
361 if (r)
362 goto error;
363
364 dma_fence_put(fence);
365 fence = next;
366
367 amdgpu_res_next(&src_mm, cur_size);
368 amdgpu_res_next(&dst_mm, cur_size);
369 }
370 error:
371 mutex_unlock(&adev->mman.gtt_window_lock);
372 *f = fence;
373 return r;
374 }
375
376 /*
377 * amdgpu_move_blit - Copy an entire buffer to another buffer
378 *
379 * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
380 * help move buffers to and from VRAM.
381 */
amdgpu_move_blit(struct ttm_buffer_object * bo,bool evict,struct ttm_resource * new_mem,struct ttm_resource * old_mem)382 static int amdgpu_move_blit(struct ttm_buffer_object *bo,
383 bool evict,
384 struct ttm_resource *new_mem,
385 struct ttm_resource *old_mem)
386 {
387 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
388 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
389 struct amdgpu_copy_mem src, dst;
390 struct dma_fence *fence = NULL;
391 int r;
392
393 src.bo = bo;
394 dst.bo = bo;
395 src.mem = old_mem;
396 dst.mem = new_mem;
397 src.offset = 0;
398 dst.offset = 0;
399
400 r = amdgpu_ttm_copy_mem_to_mem(adev,
401 &adev->mman.move_entity,
402 &src, &dst,
403 new_mem->size,
404 amdgpu_bo_encrypted(abo),
405 bo->base.resv, &fence);
406 if (r)
407 goto error;
408
409 /* clear the space being freed */
410 if (old_mem->mem_type == TTM_PL_VRAM &&
411 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
412 struct dma_fence *wipe_fence = NULL;
413
414 r = amdgpu_fill_buffer(&adev->mman.move_entity,
415 abo, 0, NULL, &wipe_fence,
416 AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
417 if (r) {
418 goto error;
419 } else if (wipe_fence) {
420 amdgpu_vram_mgr_set_cleared(bo->resource);
421 dma_fence_put(fence);
422 fence = wipe_fence;
423 }
424 }
425
426 /* Always block for VM page tables before committing the new location */
427 if (bo->type == ttm_bo_type_kernel)
428 r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
429 else
430 r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
431 dma_fence_put(fence);
432 return r;
433
434 error:
435 if (fence)
436 dma_fence_wait(fence, false);
437 dma_fence_put(fence);
438 return r;
439 }
440
441 /**
442 * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
443 * @adev: amdgpu device
444 * @res: the resource to check
445 *
446 * Returns: true if the full resource is CPU visible, false otherwise.
447 */
amdgpu_res_cpu_visible(struct amdgpu_device * adev,struct ttm_resource * res)448 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
449 struct ttm_resource *res)
450 {
451 struct amdgpu_res_cursor cursor;
452
453 if (!res)
454 return false;
455
456 if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
457 res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
458 res->mem_type == AMDGPU_PL_MMIO_REMAP)
459 return true;
460
461 if (res->mem_type != TTM_PL_VRAM)
462 return false;
463
464 amdgpu_res_first(res, 0, res->size, &cursor);
465 while (cursor.remaining) {
466 if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
467 return false;
468 amdgpu_res_next(&cursor, cursor.size);
469 }
470
471 return true;
472 }
473
474 /*
475 * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
476 *
477 * Called by amdgpu_bo_move()
478 */
amdgpu_res_copyable(struct amdgpu_device * adev,struct ttm_resource * mem)479 static bool amdgpu_res_copyable(struct amdgpu_device *adev,
480 struct ttm_resource *mem)
481 {
482 if (!amdgpu_res_cpu_visible(adev, mem))
483 return false;
484
485 /* ttm_resource_ioremap only supports contiguous memory */
486 if (mem->mem_type == TTM_PL_VRAM &&
487 !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
488 return false;
489
490 return true;
491 }
492
493 /*
494 * amdgpu_bo_move - Move a buffer object to a new memory location
495 *
496 * Called by ttm_bo_handle_move_mem()
497 */
amdgpu_bo_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)498 static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
499 struct ttm_operation_ctx *ctx,
500 struct ttm_resource *new_mem,
501 struct ttm_place *hop)
502 {
503 struct amdgpu_device *adev;
504 struct amdgpu_bo *abo;
505 struct ttm_resource *old_mem = bo->resource;
506 int r;
507
508 if (new_mem->mem_type == TTM_PL_TT ||
509 new_mem->mem_type == AMDGPU_PL_PREEMPT) {
510 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
511 if (r)
512 return r;
513 }
514
515 abo = ttm_to_amdgpu_bo(bo);
516 adev = amdgpu_ttm_adev(bo->bdev);
517
518 if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
519 bo->ttm == NULL)) {
520 amdgpu_bo_move_notify(bo, evict, new_mem);
521 ttm_bo_move_null(bo, new_mem);
522 return 0;
523 }
524 if (old_mem->mem_type == TTM_PL_SYSTEM &&
525 (new_mem->mem_type == TTM_PL_TT ||
526 new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
527 amdgpu_bo_move_notify(bo, evict, new_mem);
528 ttm_bo_move_null(bo, new_mem);
529 return 0;
530 }
531 if ((old_mem->mem_type == TTM_PL_TT ||
532 old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
533 new_mem->mem_type == TTM_PL_SYSTEM) {
534 r = ttm_bo_wait_ctx(bo, ctx);
535 if (r)
536 return r;
537
538 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
539 amdgpu_bo_move_notify(bo, evict, new_mem);
540 ttm_resource_free(bo, &bo->resource);
541 ttm_bo_assign_mem(bo, new_mem);
542 return 0;
543 }
544
545 if (old_mem->mem_type == AMDGPU_PL_GDS ||
546 old_mem->mem_type == AMDGPU_PL_GWS ||
547 old_mem->mem_type == AMDGPU_PL_OA ||
548 old_mem->mem_type == AMDGPU_PL_DOORBELL ||
549 old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
550 new_mem->mem_type == AMDGPU_PL_GDS ||
551 new_mem->mem_type == AMDGPU_PL_GWS ||
552 new_mem->mem_type == AMDGPU_PL_OA ||
553 new_mem->mem_type == AMDGPU_PL_DOORBELL ||
554 new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
555 /* Nothing to save here */
556 amdgpu_bo_move_notify(bo, evict, new_mem);
557 ttm_bo_move_null(bo, new_mem);
558 return 0;
559 }
560
561 if (bo->type == ttm_bo_type_device &&
562 new_mem->mem_type == TTM_PL_VRAM &&
563 old_mem->mem_type != TTM_PL_VRAM) {
564 /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
565 * accesses the BO after it's moved.
566 */
567 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
568 }
569
570 if (adev->mman.buffer_funcs_enabled &&
571 ((old_mem->mem_type == TTM_PL_SYSTEM &&
572 new_mem->mem_type == TTM_PL_VRAM) ||
573 (old_mem->mem_type == TTM_PL_VRAM &&
574 new_mem->mem_type == TTM_PL_SYSTEM))) {
575 hop->fpfn = 0;
576 hop->lpfn = 0;
577 hop->mem_type = TTM_PL_TT;
578 hop->flags = TTM_PL_FLAG_TEMPORARY;
579 return -EMULTIHOP;
580 }
581
582 amdgpu_bo_move_notify(bo, evict, new_mem);
583 if (adev->mman.buffer_funcs_enabled)
584 r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
585 else
586 r = -ENODEV;
587
588 if (r) {
589 /* Check that all memory is CPU accessible */
590 if (!amdgpu_res_copyable(adev, old_mem) ||
591 !amdgpu_res_copyable(adev, new_mem)) {
592 pr_err("Move buffer fallback to memcpy unavailable\n");
593 return r;
594 }
595
596 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
597 if (r)
598 return r;
599 }
600
601 /* update statistics after the move */
602 if (evict)
603 atomic64_inc(&adev->num_evictions);
604 atomic64_add(bo->base.size, &adev->num_bytes_moved);
605 return 0;
606 }
607
608 /*
609 * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
610 *
611 * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
612 */
amdgpu_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)613 static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
614 struct ttm_resource *mem)
615 {
616 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
617
618 switch (mem->mem_type) {
619 case TTM_PL_SYSTEM:
620 /* system memory */
621 return 0;
622 case TTM_PL_TT:
623 case AMDGPU_PL_PREEMPT:
624 break;
625 case TTM_PL_VRAM:
626 mem->bus.offset = mem->start << PAGE_SHIFT;
627
628 if (adev->mman.aper_base_kaddr &&
629 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
630 mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
631 mem->bus.offset;
632
633 mem->bus.offset += adev->gmc.aper_base;
634 mem->bus.is_iomem = true;
635 break;
636 case AMDGPU_PL_DOORBELL:
637 mem->bus.offset = mem->start << PAGE_SHIFT;
638 mem->bus.offset += adev->doorbell.base;
639 mem->bus.is_iomem = true;
640 mem->bus.caching = ttm_uncached;
641 break;
642 case AMDGPU_PL_MMIO_REMAP:
643 mem->bus.offset = mem->start << PAGE_SHIFT;
644 mem->bus.offset += adev->rmmio_remap.bus_addr;
645 mem->bus.is_iomem = true;
646 mem->bus.caching = ttm_uncached;
647 break;
648 default:
649 return -EINVAL;
650 }
651 return 0;
652 }
653
amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object * bo,unsigned long page_offset)654 static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
655 unsigned long page_offset)
656 {
657 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
658 struct amdgpu_res_cursor cursor;
659
660 amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
661 &cursor);
662
663 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
664 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
665 else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
666 return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
667
668 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
669 }
670
671 /**
672 * amdgpu_ttm_domain_start - Returns GPU start address
673 * @adev: amdgpu device object
674 * @type: type of the memory
675 *
676 * Returns:
677 * GPU start address of a memory domain
678 */
679
amdgpu_ttm_domain_start(struct amdgpu_device * adev,uint32_t type)680 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
681 {
682 switch (type) {
683 case TTM_PL_TT:
684 return adev->gmc.gart_start;
685 case TTM_PL_VRAM:
686 return adev->gmc.vram_start;
687 }
688
689 return 0;
690 }
691
692 /*
693 * TTM backend functions.
694 */
695 struct amdgpu_ttm_tt {
696 struct ttm_tt ttm;
697 struct drm_gem_object *gobj;
698 u64 offset;
699 uint64_t userptr;
700 struct task_struct *usertask;
701 uint32_t userflags;
702 bool bound;
703 int32_t pool_id;
704 };
705
706 #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
707
708 #ifdef CONFIG_DRM_AMDGPU_USERPTR
709 /*
710 * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
711 * memory and start HMM tracking CPU page table update
712 *
713 * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
714 * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
715 * that range is a valid memory and it is freed too.
716 */
amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo * bo,struct amdgpu_hmm_range * range)717 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
718 struct amdgpu_hmm_range *range)
719 {
720 struct ttm_tt *ttm = bo->tbo.ttm;
721 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
722 unsigned long start = gtt->userptr;
723 struct vm_area_struct *vma;
724 struct mm_struct *mm;
725 bool readonly;
726 int r = 0;
727
728 mm = bo->notifier.mm;
729 if (unlikely(!mm)) {
730 DRM_DEBUG_DRIVER("BO is not registered?\n");
731 return -EFAULT;
732 }
733
734 if (!mmget_not_zero(mm)) /* Happens during process shutdown */
735 return -ESRCH;
736
737 mmap_read_lock(mm);
738 vma = vma_lookup(mm, start);
739 if (unlikely(!vma)) {
740 r = -EFAULT;
741 goto out_unlock;
742 }
743 if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
744 vma->vm_file)) {
745 r = -EPERM;
746 goto out_unlock;
747 }
748
749 readonly = amdgpu_ttm_tt_is_readonly(ttm);
750 r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
751 readonly, NULL, range);
752 out_unlock:
753 mmap_read_unlock(mm);
754 if (r)
755 pr_debug("failed %d to get user pages 0x%lx\n", r, start);
756
757 mmput(mm);
758
759 return r;
760 }
761
762 #endif
763
764 /*
765 * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
766 *
767 * Called by amdgpu_cs_list_validate(). This creates the page list
768 * that backs user memory and will ultimately be mapped into the device
769 * address space.
770 */
amdgpu_ttm_tt_set_user_pages(struct ttm_tt * ttm,struct amdgpu_hmm_range * range)771 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
772 {
773 unsigned long i;
774
775 for (i = 0; i < ttm->num_pages; ++i)
776 ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
777 }
778
779 /*
780 * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
781 *
782 * Called by amdgpu_ttm_backend_bind()
783 **/
amdgpu_ttm_tt_pin_userptr(struct ttm_device * bdev,struct ttm_tt * ttm)784 static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
785 struct ttm_tt *ttm)
786 {
787 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
788 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
789 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
790 enum dma_data_direction direction = write ?
791 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
792 int r;
793
794 /* Allocate an SG array and squash pages into it */
795 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
796 (u64)ttm->num_pages << PAGE_SHIFT,
797 GFP_KERNEL);
798 if (r)
799 goto release_sg;
800
801 /* Map SG to device */
802 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
803 if (r)
804 goto release_sg_table;
805
806 /* convert SG to linear array of pages and dma addresses */
807 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
808 ttm->num_pages);
809
810 return 0;
811
812 release_sg_table:
813 sg_free_table(ttm->sg);
814 release_sg:
815 kfree(ttm->sg);
816 ttm->sg = NULL;
817 return r;
818 }
819
820 /*
821 * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
822 */
amdgpu_ttm_tt_unpin_userptr(struct ttm_device * bdev,struct ttm_tt * ttm)823 static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
824 struct ttm_tt *ttm)
825 {
826 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
827 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
828 int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
829 enum dma_data_direction direction = write ?
830 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
831
832 /* double check that we don't free the table twice */
833 if (!ttm->sg || !ttm->sg->sgl)
834 return;
835
836 /* unmap the pages mapped to the device */
837 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
838 sg_free_table(ttm->sg);
839 }
840
841 /*
842 * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
843 * MQDn+CtrlStackn where n is the number of XCCs per partition.
844 * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
845 * and uses memory type default, UC. The rest of pages_per_xcc are
846 * Ctrl stack and modify their memory type to NC.
847 */
amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device * adev,struct ttm_tt * ttm,uint64_t flags)848 static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
849 struct ttm_tt *ttm, uint64_t flags)
850 {
851 struct amdgpu_ttm_tt *gtt = (void *)ttm;
852 uint64_t total_pages = ttm->num_pages;
853 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
854 uint64_t page_idx, pages_per_xcc;
855 int i;
856
857 pages_per_xcc = total_pages;
858 do_div(pages_per_xcc, num_xcc);
859
860 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
861 amdgpu_gart_map_gfx9_mqd(adev,
862 gtt->offset + (page_idx << PAGE_SHIFT),
863 pages_per_xcc, >t->ttm.dma_address[page_idx],
864 flags);
865 }
866 }
867
amdgpu_ttm_gart_bind(struct amdgpu_device * adev,struct ttm_buffer_object * tbo,uint64_t flags)868 static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
869 struct ttm_buffer_object *tbo,
870 uint64_t flags)
871 {
872 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
873 struct ttm_tt *ttm = tbo->ttm;
874 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
875
876 if (amdgpu_bo_encrypted(abo))
877 flags |= AMDGPU_PTE_TMZ;
878
879 if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
880 amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
881 } else {
882 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
883 gtt->ttm.dma_address, flags);
884 }
885 gtt->bound = true;
886 }
887
888 /*
889 * amdgpu_ttm_backend_bind - Bind GTT memory
890 *
891 * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
892 * This handles binding GTT memory to the device address space.
893 */
amdgpu_ttm_backend_bind(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_resource * bo_mem)894 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
895 struct ttm_tt *ttm,
896 struct ttm_resource *bo_mem)
897 {
898 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
899 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
900 uint64_t flags;
901 int r;
902
903 if (!bo_mem)
904 return -EINVAL;
905
906 if (gtt->bound)
907 return 0;
908
909 if (gtt->userptr) {
910 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
911 if (r) {
912 dev_err(adev->dev, "failed to pin userptr\n");
913 return r;
914 }
915 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
916 if (!ttm->sg) {
917 struct dma_buf_attachment *attach;
918 struct sg_table *sgt;
919
920 attach = gtt->gobj->import_attach;
921 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
922 if (IS_ERR(sgt))
923 return PTR_ERR(sgt);
924
925 ttm->sg = sgt;
926 }
927
928 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
929 ttm->num_pages);
930 }
931
932 if (!ttm->num_pages) {
933 WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
934 ttm->num_pages, bo_mem, ttm);
935 }
936
937 if (bo_mem->mem_type != TTM_PL_TT ||
938 !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
939 gtt->offset = AMDGPU_BO_INVALID_OFFSET;
940 return 0;
941 }
942
943 /* compute PTE flags relevant to this BO memory */
944 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
945
946 /* bind pages into GART page tables */
947 gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
948 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
949 gtt->ttm.dma_address, flags);
950 gtt->bound = true;
951 return 0;
952 }
953
954 /*
955 * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
956 * through AGP or GART aperture.
957 *
958 * If bo is accessible through AGP aperture, then use AGP aperture
959 * to access bo; otherwise allocate logical space in GART aperture
960 * and map bo to GART aperture.
961 */
amdgpu_ttm_alloc_gart(struct ttm_buffer_object * bo)962 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
963 {
964 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
965 struct ttm_operation_ctx ctx = { false, false };
966 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
967 struct ttm_placement placement;
968 struct ttm_place placements;
969 struct ttm_resource *tmp;
970 uint64_t addr, flags;
971 int r;
972
973 if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
974 return 0;
975
976 addr = amdgpu_gmc_agp_addr(bo);
977 if (addr != AMDGPU_BO_INVALID_OFFSET)
978 return 0;
979
980 /* allocate GART space */
981 placement.num_placement = 1;
982 placement.placement = &placements;
983 placements.fpfn = 0;
984 placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
985 placements.mem_type = TTM_PL_TT;
986 placements.flags = bo->resource->placement;
987
988 r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
989 if (unlikely(r))
990 return r;
991
992 /* compute PTE flags for this buffer object */
993 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
994
995 /* Bind pages */
996 gtt->offset = (u64)tmp->start << PAGE_SHIFT;
997 amdgpu_ttm_gart_bind(adev, bo, flags);
998 amdgpu_gart_invalidate_tlb(adev);
999 ttm_resource_free(bo, &bo->resource);
1000 ttm_bo_assign_mem(bo, tmp);
1001
1002 return 0;
1003 }
1004
1005 /*
1006 * amdgpu_ttm_recover_gart - Rebind GTT pages
1007 *
1008 * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
1009 * rebind GTT pages during a GPU reset.
1010 */
amdgpu_ttm_recover_gart(struct ttm_buffer_object * tbo)1011 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
1012 {
1013 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
1014 uint64_t flags;
1015
1016 if (!tbo->ttm)
1017 return;
1018
1019 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
1020 amdgpu_ttm_gart_bind(adev, tbo, flags);
1021 }
1022
1023 /*
1024 * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
1025 *
1026 * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
1027 * ttm_tt_destroy().
1028 */
amdgpu_ttm_backend_unbind(struct ttm_device * bdev,struct ttm_tt * ttm)1029 static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
1030 struct ttm_tt *ttm)
1031 {
1032 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1033 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1034
1035 /* if the pages have userptr pinning then clear that first */
1036 if (gtt->userptr) {
1037 amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
1038 } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
1039 struct dma_buf_attachment *attach;
1040
1041 attach = gtt->gobj->import_attach;
1042 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
1043 ttm->sg = NULL;
1044 }
1045
1046 if (!gtt->bound)
1047 return;
1048
1049 if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
1050 return;
1051
1052 /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
1053 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
1054 gtt->bound = false;
1055 }
1056
amdgpu_ttm_backend_destroy(struct ttm_device * bdev,struct ttm_tt * ttm)1057 static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
1058 struct ttm_tt *ttm)
1059 {
1060 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1061
1062 if (gtt->usertask)
1063 put_task_struct(gtt->usertask);
1064
1065 ttm_tt_fini(>t->ttm);
1066 kfree(gtt);
1067 }
1068
1069 /**
1070 * amdgpu_ttm_mmio_remap_alloc_sgt - build an sg_table for MMIO_REMAP I/O aperture
1071 * @adev: amdgpu device providing the remap BAR base (adev->rmmio_remap.bus_addr)
1072 * @res: TTM resource of the BO to export; expected to live in AMDGPU_PL_MMIO_REMAP
1073 * @dev: importing device to map for (typically @attach->dev in dma-buf paths)
1074 * @dir: DMA data direction for the importer (passed to dma_map_resource())
1075 * @sgt: output; on success, set to a newly allocated sg_table describing the I/O span
1076 *
1077 * The HDP flush page (AMDGPU_PL_MMIO_REMAP) is a fixed hardware I/O window in a PCI
1078 * BAR—there are no struct pages to back it. Importers still need a DMA address list,
1079 * so we synthesize a minimal sg_table and populate it from dma_map_resource(), not
1080 * from pages. Using the common amdgpu_res_cursor walker keeps the offset/size math
1081 * consistent with other TTM/manager users.
1082 *
1083 * - @res is assumed to be a small, contiguous I/O region (typically a single 4 KiB
1084 * page) in AMDGPU_PL_MMIO_REMAP. Callers should validate placement before calling.
1085 * - The sg entry is created with sg_set_page(sg, NULL, …) to reflect I/O space.
1086 * - The mapping uses DMA_ATTR_SKIP_CPU_SYNC because this is MMIO, not cacheable RAM.
1087 * - Peer reachability / p2pdma policy checks must be done by the caller.
1088 *
1089 * Return:
1090 * * 0 on success, with *@sgt set to a valid table that must be freed via
1091 * amdgpu_ttm_mmio_remap_free_sgt().
1092 * * -ENOMEM if allocation of the sg_table fails.
1093 * * -EIO if dma_map_resource() fails.
1094 *
1095 */
amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device * adev,struct ttm_resource * res,struct device * dev,enum dma_data_direction dir,struct sg_table ** sgt)1096 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev,
1097 struct ttm_resource *res,
1098 struct device *dev,
1099 enum dma_data_direction dir,
1100 struct sg_table **sgt)
1101 {
1102 struct amdgpu_res_cursor cur;
1103 dma_addr_t dma;
1104 resource_size_t phys;
1105 struct scatterlist *sg;
1106 int r;
1107
1108 /* Walk the resource once; MMIO_REMAP is expected to be contiguous+small. */
1109 amdgpu_res_first(res, 0, res->size, &cur);
1110
1111 /* Translate byte offset in the remap window into a host physical BAR address. */
1112 phys = adev->rmmio_remap.bus_addr + cur.start;
1113
1114 /* Build a single-entry sg_table mapped as I/O (no struct page backing). */
1115 *sgt = kzalloc_obj(**sgt);
1116 if (!*sgt)
1117 return -ENOMEM;
1118 r = sg_alloc_table(*sgt, 1, GFP_KERNEL);
1119 if (r) {
1120 kfree(*sgt);
1121 return r;
1122 }
1123 sg = (*sgt)->sgl;
1124 sg_set_page(sg, NULL, cur.size, 0); /* WHY: I/O space → no pages */
1125
1126 dma = dma_map_resource(dev, phys, cur.size, dir, DMA_ATTR_SKIP_CPU_SYNC);
1127 if (dma_mapping_error(dev, dma)) {
1128 sg_free_table(*sgt);
1129 kfree(*sgt);
1130 return -EIO;
1131 }
1132 sg_dma_address(sg) = dma;
1133 sg_dma_len(sg) = cur.size;
1134 return 0;
1135 }
1136
amdgpu_ttm_mmio_remap_free_sgt(struct device * dev,enum dma_data_direction dir,struct sg_table * sgt)1137 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev,
1138 enum dma_data_direction dir,
1139 struct sg_table *sgt)
1140 {
1141 struct scatterlist *sg = sgt->sgl;
1142
1143 dma_unmap_resource(dev, sg_dma_address(sg), sg_dma_len(sg),
1144 dir, DMA_ATTR_SKIP_CPU_SYNC);
1145 sg_free_table(sgt);
1146 kfree(sgt);
1147 }
1148
1149 /**
1150 * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
1151 *
1152 * @bo: The buffer object to create a GTT ttm_tt object around
1153 * @page_flags: Page flags to be added to the ttm_tt object
1154 *
1155 * Called by ttm_tt_create().
1156 */
amdgpu_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)1157 static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
1158 uint32_t page_flags)
1159 {
1160 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1161 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1162 struct amdgpu_ttm_tt *gtt;
1163 enum ttm_caching caching;
1164
1165 gtt = kzalloc_obj(struct amdgpu_ttm_tt);
1166 if (!gtt)
1167 return NULL;
1168
1169 gtt->gobj = &bo->base;
1170 if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
1171 gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
1172 else
1173 gtt->pool_id = abo->xcp_id;
1174
1175 if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
1176 caching = ttm_write_combined;
1177 else
1178 caching = ttm_cached;
1179
1180 /* allocate space for the uninitialized page entries */
1181 if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
1182 kfree(gtt);
1183 return NULL;
1184 }
1185 return >t->ttm;
1186 }
1187
1188 /*
1189 * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
1190 *
1191 * Map the pages of a ttm_tt object to an address space visible
1192 * to the underlying device.
1193 */
amdgpu_ttm_tt_populate(struct ttm_device * bdev,struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)1194 static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
1195 struct ttm_tt *ttm,
1196 struct ttm_operation_ctx *ctx)
1197 {
1198 struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
1199 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1200 struct ttm_pool *pool;
1201 pgoff_t i;
1202 int ret;
1203
1204 /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
1205 if (gtt->userptr) {
1206 ttm->sg = kzalloc_obj(struct sg_table);
1207 if (!ttm->sg)
1208 return -ENOMEM;
1209 return 0;
1210 }
1211
1212 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1213 return 0;
1214
1215 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1216 pool = &adev->mman.ttm_pools[gtt->pool_id];
1217 else
1218 pool = &adev->mman.bdev.pool;
1219 ret = ttm_pool_alloc(pool, ttm, ctx);
1220 if (ret)
1221 return ret;
1222
1223 for (i = 0; i < ttm->num_pages; ++i)
1224 ttm->pages[i]->mapping = bdev->dev_mapping;
1225
1226 return 0;
1227 }
1228
1229 /*
1230 * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
1231 *
1232 * Unmaps pages of a ttm_tt object from the device address space and
1233 * unpopulates the page array backing it.
1234 */
amdgpu_ttm_tt_unpopulate(struct ttm_device * bdev,struct ttm_tt * ttm)1235 static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
1236 struct ttm_tt *ttm)
1237 {
1238 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1239 struct amdgpu_device *adev;
1240 struct ttm_pool *pool;
1241 pgoff_t i;
1242
1243 amdgpu_ttm_backend_unbind(bdev, ttm);
1244
1245 if (gtt->userptr) {
1246 amdgpu_ttm_tt_set_user_pages(ttm, NULL);
1247 kfree(ttm->sg);
1248 ttm->sg = NULL;
1249 return;
1250 }
1251
1252 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
1253 return;
1254
1255 for (i = 0; i < ttm->num_pages; ++i)
1256 ttm->pages[i]->mapping = NULL;
1257
1258 adev = amdgpu_ttm_adev(bdev);
1259
1260 if (adev->mman.ttm_pools && gtt->pool_id >= 0)
1261 pool = &adev->mman.ttm_pools[gtt->pool_id];
1262 else
1263 pool = &adev->mman.bdev.pool;
1264
1265 return ttm_pool_free(pool, ttm);
1266 }
1267
1268 /**
1269 * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
1270 * task
1271 *
1272 * @tbo: The ttm_buffer_object that contains the userptr
1273 * @user_addr: The returned value
1274 */
amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object * tbo,uint64_t * user_addr)1275 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
1276 uint64_t *user_addr)
1277 {
1278 struct amdgpu_ttm_tt *gtt;
1279
1280 if (!tbo->ttm)
1281 return -EINVAL;
1282
1283 gtt = (void *)tbo->ttm;
1284 *user_addr = gtt->userptr;
1285 return 0;
1286 }
1287
1288 /**
1289 * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
1290 * task
1291 *
1292 * @bo: The ttm_buffer_object to bind this userptr to
1293 * @addr: The address in the current tasks VM space to use
1294 * @flags: Requirements of userptr object.
1295 *
1296 * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
1297 * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
1298 * initialize GPU VM for a KFD process.
1299 */
amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object * bo,uint64_t addr,uint32_t flags)1300 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
1301 uint64_t addr, uint32_t flags)
1302 {
1303 struct amdgpu_ttm_tt *gtt;
1304
1305 if (!bo->ttm) {
1306 /* TODO: We want a separate TTM object type for userptrs */
1307 bo->ttm = amdgpu_ttm_tt_create(bo, 0);
1308 if (bo->ttm == NULL)
1309 return -ENOMEM;
1310 }
1311
1312 /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
1313 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
1314
1315 gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
1316 gtt->userptr = addr;
1317 gtt->userflags = flags;
1318
1319 if (gtt->usertask)
1320 put_task_struct(gtt->usertask);
1321 gtt->usertask = current->group_leader;
1322 get_task_struct(gtt->usertask);
1323
1324 return 0;
1325 }
1326
1327 /*
1328 * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
1329 */
amdgpu_ttm_tt_get_usermm(struct ttm_tt * ttm)1330 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
1331 {
1332 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1333
1334 if (gtt == NULL)
1335 return NULL;
1336
1337 if (gtt->usertask == NULL)
1338 return NULL;
1339
1340 return gtt->usertask->mm;
1341 }
1342
1343 /*
1344 * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
1345 * address range for the current task.
1346 *
1347 */
amdgpu_ttm_tt_affect_userptr(struct ttm_tt * ttm,unsigned long start,unsigned long end,unsigned long * userptr)1348 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
1349 unsigned long end, unsigned long *userptr)
1350 {
1351 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1352 unsigned long size;
1353
1354 if (gtt == NULL || !gtt->userptr)
1355 return false;
1356
1357 /* Return false if no part of the ttm_tt object lies within
1358 * the range
1359 */
1360 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
1361 if (gtt->userptr > end || gtt->userptr + size <= start)
1362 return false;
1363
1364 if (userptr)
1365 *userptr = gtt->userptr;
1366 return true;
1367 }
1368
1369 /*
1370 * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
1371 */
amdgpu_ttm_tt_is_userptr(struct ttm_tt * ttm)1372 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
1373 {
1374 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1375
1376 if (gtt == NULL || !gtt->userptr)
1377 return false;
1378
1379 return true;
1380 }
1381
1382 /*
1383 * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
1384 */
amdgpu_ttm_tt_is_readonly(struct ttm_tt * ttm)1385 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
1386 {
1387 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
1388
1389 if (gtt == NULL)
1390 return false;
1391
1392 return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
1393 }
1394
1395 /**
1396 * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
1397 *
1398 * @ttm: The ttm_tt object to compute the flags for
1399 * @mem: The memory registry backing this ttm_tt object
1400 *
1401 * Figure out the flags to use for a VM PDE (Page Directory Entry).
1402 */
amdgpu_ttm_tt_pde_flags(struct ttm_tt * ttm,struct ttm_resource * mem)1403 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
1404 {
1405 uint64_t flags = 0;
1406
1407 if (mem && mem->mem_type != TTM_PL_SYSTEM)
1408 flags |= AMDGPU_PTE_VALID;
1409
1410 if (mem && (mem->mem_type == TTM_PL_TT ||
1411 mem->mem_type == AMDGPU_PL_DOORBELL ||
1412 mem->mem_type == AMDGPU_PL_PREEMPT ||
1413 mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
1414 flags |= AMDGPU_PTE_SYSTEM;
1415
1416 if (ttm && ttm->caching == ttm_cached)
1417 flags |= AMDGPU_PTE_SNOOPED;
1418 }
1419
1420 if (mem && mem->mem_type == TTM_PL_VRAM &&
1421 mem->bus.caching == ttm_cached)
1422 flags |= AMDGPU_PTE_SNOOPED;
1423
1424 return flags;
1425 }
1426
1427 /**
1428 * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
1429 *
1430 * @adev: amdgpu_device pointer
1431 * @ttm: The ttm_tt object to compute the flags for
1432 * @mem: The memory registry backing this ttm_tt object
1433 *
1434 * Figure out the flags to use for a VM PTE (Page Table Entry).
1435 */
amdgpu_ttm_tt_pte_flags(struct amdgpu_device * adev,struct ttm_tt * ttm,struct ttm_resource * mem)1436 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
1437 struct ttm_resource *mem)
1438 {
1439 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
1440
1441 flags |= adev->gart.gart_pte_flags;
1442 flags |= AMDGPU_PTE_READABLE;
1443
1444 if (!amdgpu_ttm_tt_is_readonly(ttm))
1445 flags |= AMDGPU_PTE_WRITEABLE;
1446
1447 return flags;
1448 }
1449
1450 /*
1451 * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
1452 * object.
1453 *
1454 * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
1455 * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
1456 * it can find space for a new object and by ttm_bo_force_list_clean() which is
1457 * used to clean out a memory space.
1458 */
amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)1459 static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
1460 const struct ttm_place *place)
1461 {
1462 struct dma_resv_iter resv_cursor;
1463 struct dma_fence *f;
1464
1465 if (!amdgpu_bo_is_amdgpu_bo(bo))
1466 return ttm_bo_eviction_valuable(bo, place);
1467
1468 /* Swapout? */
1469 if (bo->resource->mem_type == TTM_PL_SYSTEM)
1470 return true;
1471
1472 if (bo->type == ttm_bo_type_kernel &&
1473 !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
1474 return false;
1475
1476 /* If bo is a KFD BO, check if the bo belongs to the current process.
1477 * If true, then return false as any KFD process needs all its BOs to
1478 * be resident to run successfully
1479 */
1480 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
1481 DMA_RESV_USAGE_BOOKKEEP, f) {
1482 if (amdkfd_fence_check_mm(f, current->mm) &&
1483 !(place->flags & TTM_PL_FLAG_CONTIGUOUS))
1484 return false;
1485 }
1486
1487 /* Preemptible BOs don't own system resources managed by the
1488 * driver (pages, VRAM, GART space). They point to resources
1489 * owned by someone else (e.g. pageable memory in user mode
1490 * or a DMABuf). They are used in a preemptible context so we
1491 * can guarantee no deadlocks and good QoS in case of MMU
1492 * notifiers or DMABuf move notifiers from the resource owner.
1493 */
1494 if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
1495 return false;
1496
1497 if (bo->resource->mem_type == TTM_PL_TT &&
1498 amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
1499 return false;
1500
1501 return ttm_bo_eviction_valuable(bo, place);
1502 }
1503
amdgpu_ttm_vram_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)1504 static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
1505 void *buf, size_t size, bool write)
1506 {
1507 while (size) {
1508 uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
1509 uint64_t bytes = 4 - (pos & 0x3);
1510 uint32_t shift = (pos & 0x3) * 8;
1511 uint32_t mask = 0xffffffff << shift;
1512 uint32_t value = 0;
1513
1514 if (size < bytes) {
1515 mask &= 0xffffffff >> (bytes - size) * 8;
1516 bytes = size;
1517 }
1518
1519 if (mask != 0xffffffff) {
1520 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
1521 if (write) {
1522 value &= ~mask;
1523 value |= (*(uint32_t *)buf << shift) & mask;
1524 amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
1525 } else {
1526 value = (value & mask) >> shift;
1527 memcpy(buf, &value, bytes);
1528 }
1529 } else {
1530 amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
1531 }
1532
1533 pos += bytes;
1534 buf += bytes;
1535 size -= bytes;
1536 }
1537 }
1538
amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)1539 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
1540 unsigned long offset, void *buf,
1541 int len, int write)
1542 {
1543 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1544 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1545 struct amdgpu_res_cursor src_mm;
1546 struct amdgpu_job *job;
1547 struct dma_fence *fence;
1548 uint64_t src_addr, dst_addr;
1549 unsigned int num_dw;
1550 int r, idx;
1551
1552 if (len != PAGE_SIZE)
1553 return -EINVAL;
1554
1555 if (!adev->mman.sdma_access_ptr)
1556 return -EACCES;
1557
1558 if (!drm_dev_enter(adev_to_drm(adev), &idx))
1559 return -ENODEV;
1560
1561 if (write)
1562 memcpy(adev->mman.sdma_access_ptr, buf, len);
1563
1564 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
1565 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.default_entity.base,
1566 AMDGPU_FENCE_OWNER_UNDEFINED,
1567 num_dw * 4, AMDGPU_IB_POOL_DELAYED,
1568 &job,
1569 AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
1570 if (r)
1571 goto out;
1572
1573 mutex_lock(&adev->mman.gtt_window_lock);
1574 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
1575 src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
1576 src_mm.start;
1577 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
1578 if (write)
1579 swap(src_addr, dst_addr);
1580
1581 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
1582 PAGE_SIZE, 0);
1583
1584 fence = amdgpu_ttm_job_submit(adev, job, num_dw);
1585 mutex_unlock(&adev->mman.gtt_window_lock);
1586
1587 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
1588 r = -ETIMEDOUT;
1589 dma_fence_put(fence);
1590
1591 if (!(r || write))
1592 memcpy(buf, adev->mman.sdma_access_ptr, len);
1593 out:
1594 drm_dev_exit(idx);
1595 return r;
1596 }
1597
1598 /**
1599 * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
1600 *
1601 * @bo: The buffer object to read/write
1602 * @offset: Offset into buffer object
1603 * @buf: Secondary buffer to write/read from
1604 * @len: Length in bytes of access
1605 * @write: true if writing
1606 *
1607 * This is used to access VRAM that backs a buffer object via MMIO
1608 * access for debugging purposes.
1609 */
amdgpu_ttm_access_memory(struct ttm_buffer_object * bo,unsigned long offset,void * buf,int len,int write)1610 static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
1611 unsigned long offset, void *buf, int len,
1612 int write)
1613 {
1614 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
1615 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
1616 struct amdgpu_res_cursor cursor;
1617 int ret = 0;
1618
1619 if (bo->resource->mem_type != TTM_PL_VRAM)
1620 return -EIO;
1621
1622 if (amdgpu_device_has_timeouts_enabled(adev) &&
1623 !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
1624 return len;
1625
1626 amdgpu_res_first(bo->resource, offset, len, &cursor);
1627 while (cursor.remaining) {
1628 size_t count, size = cursor.size;
1629 loff_t pos = cursor.start;
1630
1631 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
1632 size -= count;
1633 if (size) {
1634 /* using MM to access rest vram and handle un-aligned address */
1635 pos += count;
1636 buf += count;
1637 amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
1638 }
1639
1640 ret += cursor.size;
1641 buf += cursor.size;
1642 amdgpu_res_next(&cursor, cursor.size);
1643 }
1644
1645 return ret;
1646 }
1647
1648 static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object * bo)1649 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
1650 {
1651 amdgpu_bo_move_notify(bo, false, NULL);
1652 }
1653
1654 static struct ttm_device_funcs amdgpu_bo_driver = {
1655 .ttm_tt_create = &amdgpu_ttm_tt_create,
1656 .ttm_tt_populate = &amdgpu_ttm_tt_populate,
1657 .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
1658 .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
1659 .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
1660 .evict_flags = &amdgpu_evict_flags,
1661 .move = &amdgpu_bo_move,
1662 .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
1663 .release_notify = &amdgpu_bo_release_notify,
1664 .io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
1665 .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
1666 .access_memory = &amdgpu_ttm_access_memory,
1667 };
1668
1669 /*
1670 * Firmware Reservation functions
1671 */
1672 /**
1673 * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
1674 *
1675 * @adev: amdgpu_device pointer
1676 *
1677 * free fw reserved vram if it has been reserved.
1678 */
amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device * adev)1679 static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
1680 {
1681 amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
1682 NULL, &adev->mman.fw_vram_usage_va);
1683 }
1684
1685 /*
1686 * Driver Reservation functions
1687 */
1688 /**
1689 * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
1690 *
1691 * @adev: amdgpu_device pointer
1692 *
1693 * free drv reserved vram if it has been reserved.
1694 */
amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device * adev)1695 static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
1696 {
1697 amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
1698 NULL,
1699 &adev->mman.drv_vram_usage_va);
1700 }
1701
1702 /**
1703 * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
1704 *
1705 * @adev: amdgpu_device pointer
1706 *
1707 * create bo vram reservation from fw.
1708 */
amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device * adev)1709 static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
1710 {
1711 uint64_t vram_size = adev->gmc.visible_vram_size;
1712
1713 adev->mman.fw_vram_usage_va = NULL;
1714 adev->mman.fw_vram_usage_reserved_bo = NULL;
1715
1716 if (adev->mman.fw_vram_usage_size == 0 ||
1717 adev->mman.fw_vram_usage_size > vram_size)
1718 return 0;
1719
1720 return amdgpu_bo_create_kernel_at(adev,
1721 adev->mman.fw_vram_usage_start_offset,
1722 adev->mman.fw_vram_usage_size,
1723 &adev->mman.fw_vram_usage_reserved_bo,
1724 &adev->mman.fw_vram_usage_va);
1725 }
1726
1727 /**
1728 * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
1729 *
1730 * @adev: amdgpu_device pointer
1731 *
1732 * create bo vram reservation from drv.
1733 */
amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device * adev)1734 static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
1735 {
1736 u64 vram_size = adev->gmc.visible_vram_size;
1737
1738 adev->mman.drv_vram_usage_va = NULL;
1739 adev->mman.drv_vram_usage_reserved_bo = NULL;
1740
1741 if (adev->mman.drv_vram_usage_size == 0 ||
1742 adev->mman.drv_vram_usage_size > vram_size)
1743 return 0;
1744
1745 return amdgpu_bo_create_kernel_at(adev,
1746 adev->mman.drv_vram_usage_start_offset,
1747 adev->mman.drv_vram_usage_size,
1748 &adev->mman.drv_vram_usage_reserved_bo,
1749 &adev->mman.drv_vram_usage_va);
1750 }
1751
1752 /*
1753 * Memoy training reservation functions
1754 */
1755
1756 /**
1757 * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
1758 *
1759 * @adev: amdgpu_device pointer
1760 *
1761 * free memory training reserved vram if it has been reserved.
1762 */
amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device * adev)1763 static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
1764 {
1765 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1766
1767 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
1768 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
1769 ctx->c2p_bo = NULL;
1770
1771 return 0;
1772 }
1773
amdgpu_ttm_training_data_block_init(struct amdgpu_device * adev,uint32_t reserve_size)1774 static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
1775 uint32_t reserve_size)
1776 {
1777 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1778
1779 memset(ctx, 0, sizeof(*ctx));
1780
1781 ctx->c2p_train_data_offset =
1782 ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
1783 ctx->p2c_train_data_offset =
1784 (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
1785 ctx->train_data_size =
1786 GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
1787
1788 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
1789 ctx->train_data_size,
1790 ctx->p2c_train_data_offset,
1791 ctx->c2p_train_data_offset);
1792 }
1793
1794 /*
1795 * reserve TMR memory at the top of VRAM which holds
1796 * IP Discovery data and is protected by PSP.
1797 */
amdgpu_ttm_reserve_tmr(struct amdgpu_device * adev)1798 static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
1799 {
1800 struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
1801 bool mem_train_support = false;
1802 uint32_t reserve_size = 0;
1803 int ret;
1804
1805 if (adev->bios && !amdgpu_sriov_vf(adev)) {
1806 if (amdgpu_atomfirmware_mem_training_supported(adev))
1807 mem_train_support = true;
1808 else
1809 DRM_DEBUG("memory training does not support!\n");
1810 }
1811
1812 /*
1813 * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
1814 * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
1815 *
1816 * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
1817 * discovery data and G6 memory training data respectively
1818 */
1819 if (adev->bios)
1820 reserve_size =
1821 amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
1822
1823 if (!adev->bios &&
1824 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
1825 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
1826 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
1827 reserve_size = max(reserve_size, (uint32_t)280 << 20);
1828 else if (!adev->bios &&
1829 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
1830 if (hweight32(adev->aid_mask) == 1)
1831 reserve_size = max(reserve_size, (uint32_t)128 << 20);
1832 else
1833 reserve_size = max(reserve_size, (uint32_t)144 << 20);
1834 } else if (!reserve_size)
1835 reserve_size = DISCOVERY_TMR_OFFSET;
1836
1837 if (mem_train_support) {
1838 /* reserve vram for mem train according to TMR location */
1839 amdgpu_ttm_training_data_block_init(adev, reserve_size);
1840 ret = amdgpu_bo_create_kernel_at(adev,
1841 ctx->c2p_train_data_offset,
1842 ctx->train_data_size,
1843 &ctx->c2p_bo,
1844 NULL);
1845 if (ret) {
1846 dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
1847 amdgpu_ttm_training_reserve_vram_fini(adev);
1848 return ret;
1849 }
1850 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
1851 }
1852
1853 ret = amdgpu_bo_create_kernel_at(
1854 adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
1855 &adev->mman.fw_reserved_memory, NULL);
1856 if (ret) {
1857 dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
1858 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
1859 NULL);
1860 return ret;
1861 }
1862
1863 return 0;
1864 }
1865
amdgpu_ttm_pools_init(struct amdgpu_device * adev)1866 static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
1867 {
1868 int i;
1869
1870 if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
1871 return 0;
1872
1873 adev->mman.ttm_pools = kzalloc_objs(*adev->mman.ttm_pools,
1874 adev->gmc.num_mem_partitions);
1875 if (!adev->mman.ttm_pools)
1876 return -ENOMEM;
1877
1878 for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
1879 ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
1880 adev->gmc.mem_partitions[i].numa.node,
1881 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
1882 }
1883 return 0;
1884 }
1885
amdgpu_ttm_pools_fini(struct amdgpu_device * adev)1886 static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
1887 {
1888 int i;
1889
1890 if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
1891 return;
1892
1893 for (i = 0; i < adev->gmc.num_mem_partitions; i++)
1894 ttm_pool_fini(&adev->mman.ttm_pools[i]);
1895
1896 kfree(adev->mman.ttm_pools);
1897 adev->mman.ttm_pools = NULL;
1898 }
1899
1900 /**
1901 * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton MMIO_REMAP BO
1902 * @adev: amdgpu device
1903 *
1904 * Allocates a global BO with backing AMDGPU_PL_MMIO_REMAP when the
1905 * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
1906 * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
1907 * GEM object (amdgpu_bo_create).
1908 *
1909 * Return:
1910 * * 0 on success or intentional skip (feature not present/unsupported)
1911 * * negative errno on allocation failure
1912 */
amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device * adev)1913 static int amdgpu_ttm_alloc_mmio_remap_bo(struct amdgpu_device *adev)
1914 {
1915 struct ttm_operation_ctx ctx = { false, false };
1916 struct ttm_placement placement;
1917 struct ttm_buffer_object *tbo;
1918 struct ttm_place placements;
1919 struct amdgpu_bo_param bp;
1920 struct ttm_resource *tmp;
1921 int r;
1922
1923 /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
1924 if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
1925 return 0;
1926
1927 /*
1928 * Allocate a BO first and then move it to AMDGPU_PL_MMIO_REMAP.
1929 * The initial TTM resource assigned by amdgpu_bo_create() is
1930 * replaced below with a fixed MMIO_REMAP placement.
1931 */
1932 memset(&bp, 0, sizeof(bp));
1933 bp.type = ttm_bo_type_device;
1934 bp.size = AMDGPU_GPU_PAGE_SIZE;
1935 bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
1936 bp.domain = 0;
1937 bp.flags = 0;
1938 bp.resv = NULL;
1939 bp.bo_ptr_size = sizeof(struct amdgpu_bo);
1940 r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
1941 if (r)
1942 return r;
1943
1944 r = amdgpu_bo_reserve(adev->rmmio_remap.bo, true);
1945 if (r)
1946 goto err_unref;
1947
1948 tbo = &adev->rmmio_remap.bo->tbo;
1949
1950 /*
1951 * MMIO_REMAP is a fixed I/O placement (AMDGPU_PL_MMIO_REMAP).
1952 */
1953 placement.num_placement = 1;
1954 placement.placement = &placements;
1955 placements.fpfn = 0;
1956 placements.lpfn = 0;
1957 placements.mem_type = AMDGPU_PL_MMIO_REMAP;
1958 placements.flags = 0;
1959 /* Force the BO into the fixed MMIO_REMAP placement */
1960 r = ttm_bo_mem_space(tbo, &placement, &tmp, &ctx);
1961 if (unlikely(r))
1962 goto err_unlock;
1963
1964 ttm_resource_free(tbo, &tbo->resource);
1965 ttm_bo_assign_mem(tbo, tmp);
1966 ttm_bo_pin(tbo);
1967
1968 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1969 return 0;
1970
1971 err_unlock:
1972 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1973
1974 err_unref:
1975 amdgpu_bo_unref(&adev->rmmio_remap.bo);
1976 adev->rmmio_remap.bo = NULL;
1977 return r;
1978 }
1979
1980 /**
1981 * amdgpu_ttm_free_mmio_remap_bo - Free the singleton MMIO_REMAP BO
1982 * @adev: amdgpu device
1983 *
1984 * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
1985 * amdgpu_ttm_mmio_remap_bo_init().
1986 */
amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device * adev)1987 static void amdgpu_ttm_free_mmio_remap_bo(struct amdgpu_device *adev)
1988 {
1989 if (!adev->rmmio_remap.bo)
1990 return;
1991
1992 if (!amdgpu_bo_reserve(adev->rmmio_remap.bo, true)) {
1993 ttm_bo_unpin(&adev->rmmio_remap.bo->tbo);
1994 amdgpu_bo_unreserve(adev->rmmio_remap.bo);
1995 }
1996
1997 /*
1998 * At this point we rely on normal DRM teardown ordering:
1999 * no new user ioctls can access the global MMIO_REMAP BO
2000 * once TTM teardown begins.
2001 */
2002 amdgpu_bo_unref(&adev->rmmio_remap.bo);
2003 adev->rmmio_remap.bo = NULL;
2004 }
2005
2006 /*
2007 * amdgpu_ttm_init - Init the memory management (ttm) as well as various
2008 * gtt/vram related fields.
2009 *
2010 * This initializes all of the memory space pools that the TTM layer
2011 * will need such as the GTT space (system memory mapped to the device),
2012 * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
2013 * can be mapped per VMID.
2014 */
amdgpu_ttm_init(struct amdgpu_device * adev)2015 int amdgpu_ttm_init(struct amdgpu_device *adev)
2016 {
2017 uint64_t gtt_size;
2018 int r;
2019
2020 mutex_init(&adev->mman.gtt_window_lock);
2021
2022 dma_set_max_seg_size(adev->dev, UINT_MAX);
2023 /* No others user of address space so set it to 0 */
2024 r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
2025 adev_to_drm(adev)->anon_inode->i_mapping,
2026 adev_to_drm(adev)->vma_offset_manager,
2027 (adev->need_swiotlb ?
2028 TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
2029 (dma_addressing_limited(adev->dev) ?
2030 TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
2031 TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
2032 if (r) {
2033 dev_err(adev->dev,
2034 "failed initializing buffer object driver(%d).\n", r);
2035 return r;
2036 }
2037
2038 r = amdgpu_ttm_pools_init(adev);
2039 if (r) {
2040 dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
2041 return r;
2042 }
2043 adev->mman.initialized = true;
2044
2045 if (!adev->gmc.is_app_apu) {
2046 /* Initialize VRAM pool with all of VRAM divided into pages */
2047 r = amdgpu_vram_mgr_init(adev);
2048 if (r) {
2049 dev_err(adev->dev, "Failed initializing VRAM heap.\n");
2050 return r;
2051 }
2052 }
2053
2054 /* Change the size here instead of the init above so only lpfn is affected */
2055 amdgpu_ttm_set_buffer_funcs_status(adev, false);
2056 #ifdef CONFIG_64BIT
2057 #ifdef CONFIG_X86
2058 if (adev->gmc.xgmi.connected_to_cpu)
2059 adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
2060 adev->gmc.visible_vram_size);
2061
2062 else if (adev->gmc.is_app_apu)
2063 DRM_DEBUG_DRIVER(
2064 "No need to ioremap when real vram size is 0\n");
2065 else
2066 #endif
2067 adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
2068 adev->gmc.visible_vram_size);
2069 #endif
2070
2071 /*
2072 *The reserved vram for firmware must be pinned to the specified
2073 *place on the VRAM, so reserve it early.
2074 */
2075 r = amdgpu_ttm_fw_reserve_vram_init(adev);
2076 if (r)
2077 return r;
2078
2079 /*
2080 * The reserved VRAM for the driver must be pinned to a specific
2081 * location in VRAM, so reserve it early.
2082 */
2083 r = amdgpu_ttm_drv_reserve_vram_init(adev);
2084 if (r)
2085 return r;
2086
2087 /*
2088 * only NAVI10 and later ASICs support IP discovery.
2089 * If IP discovery is enabled, a block of memory should be
2090 * reserved for it.
2091 */
2092 if (adev->discovery.reserve_tmr) {
2093 r = amdgpu_ttm_reserve_tmr(adev);
2094 if (r)
2095 return r;
2096 }
2097
2098 /* allocate memory as required for VGA
2099 * This is used for VGA emulation and pre-OS scanout buffers to
2100 * avoid display artifacts while transitioning between pre-OS
2101 * and driver.
2102 */
2103 if (!adev->gmc.is_app_apu) {
2104 r = amdgpu_bo_create_kernel_at(adev, 0,
2105 adev->mman.stolen_vga_size,
2106 &adev->mman.stolen_vga_memory,
2107 NULL);
2108 if (r)
2109 return r;
2110
2111 r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
2112 adev->mman.stolen_extended_size,
2113 &adev->mman.stolen_extended_memory,
2114 NULL);
2115
2116 if (r)
2117 return r;
2118
2119 r = amdgpu_bo_create_kernel_at(adev,
2120 adev->mman.stolen_reserved_offset,
2121 adev->mman.stolen_reserved_size,
2122 &adev->mman.stolen_reserved_memory,
2123 NULL);
2124 if (r)
2125 return r;
2126 } else {
2127 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
2128 }
2129
2130 dev_info(adev->dev, " %uM of VRAM memory ready\n",
2131 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
2132
2133 /* Compute GTT size, either based on TTM limit
2134 * or whatever the user passed on module init.
2135 */
2136 gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
2137 if (amdgpu_gtt_size != -1) {
2138 uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
2139
2140 drm_warn(&adev->ddev,
2141 "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
2142 if (gtt_size != configured_size)
2143 drm_warn(&adev->ddev,
2144 "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
2145 configured_size, gtt_size);
2146
2147 gtt_size = configured_size;
2148 }
2149
2150 /* Initialize GTT memory pool */
2151 r = amdgpu_gtt_mgr_init(adev, gtt_size);
2152 if (r) {
2153 dev_err(adev->dev, "Failed initializing GTT heap.\n");
2154 return r;
2155 }
2156 dev_info(adev->dev, " %uM of GTT memory ready.\n",
2157 (unsigned int)(gtt_size / (1024 * 1024)));
2158
2159 if (adev->flags & AMD_IS_APU) {
2160 if (adev->gmc.real_vram_size < gtt_size)
2161 adev->apu_prefer_gtt = true;
2162 }
2163
2164 /* Initialize doorbell pool on PCI BAR */
2165 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
2166 if (r) {
2167 dev_err(adev->dev, "Failed initializing doorbell heap.\n");
2168 return r;
2169 }
2170
2171 /* Create a boorbell page for kernel usages */
2172 r = amdgpu_doorbell_create_kernel_doorbells(adev);
2173 if (r) {
2174 dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
2175 return r;
2176 }
2177
2178 /* Initialize MMIO-remap pool (single page 4K) */
2179 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
2180 if (r) {
2181 dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
2182 return r;
2183 }
2184
2185 /* Allocate the singleton MMIO_REMAP BO if supported */
2186 r = amdgpu_ttm_alloc_mmio_remap_bo(adev);
2187 if (r)
2188 return r;
2189
2190 /* Initialize preemptible memory pool */
2191 r = amdgpu_preempt_mgr_init(adev);
2192 if (r) {
2193 dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
2194 return r;
2195 }
2196
2197 /* Initialize various on-chip memory pools */
2198 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
2199 if (r) {
2200 dev_err(adev->dev, "Failed initializing GDS heap.\n");
2201 return r;
2202 }
2203
2204 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
2205 if (r) {
2206 dev_err(adev->dev, "Failed initializing gws heap.\n");
2207 return r;
2208 }
2209
2210 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
2211 if (r) {
2212 dev_err(adev->dev, "Failed initializing oa heap.\n");
2213 return r;
2214 }
2215 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
2216 AMDGPU_GEM_DOMAIN_GTT,
2217 &adev->mman.sdma_access_bo, NULL,
2218 &adev->mman.sdma_access_ptr))
2219 drm_warn(adev_to_drm(adev),
2220 "Debug VRAM access will use slowpath MM access\n");
2221
2222 return 0;
2223 }
2224
2225 /*
2226 * amdgpu_ttm_fini - De-initialize the TTM memory pools
2227 */
amdgpu_ttm_fini(struct amdgpu_device * adev)2228 void amdgpu_ttm_fini(struct amdgpu_device *adev)
2229 {
2230 int idx;
2231
2232 if (!adev->mman.initialized)
2233 return;
2234
2235 amdgpu_ttm_pools_fini(adev);
2236
2237 amdgpu_ttm_training_reserve_vram_fini(adev);
2238 /* return the stolen vga memory back to VRAM */
2239 if (!adev->gmc.is_app_apu) {
2240 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
2241 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
2242 /* return the FW reserved memory back to VRAM */
2243 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
2244 NULL);
2245 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
2246 NULL);
2247 if (adev->mman.stolen_reserved_size)
2248 amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
2249 NULL, NULL);
2250 }
2251 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
2252 &adev->mman.sdma_access_ptr);
2253
2254 amdgpu_ttm_free_mmio_remap_bo(adev);
2255 amdgpu_ttm_fw_reserve_vram_fini(adev);
2256 amdgpu_ttm_drv_reserve_vram_fini(adev);
2257
2258 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
2259
2260 if (adev->mman.aper_base_kaddr)
2261 iounmap(adev->mman.aper_base_kaddr);
2262 adev->mman.aper_base_kaddr = NULL;
2263
2264 drm_dev_exit(idx);
2265 }
2266
2267 if (!adev->gmc.is_app_apu)
2268 amdgpu_vram_mgr_fini(adev);
2269 amdgpu_gtt_mgr_fini(adev);
2270 amdgpu_preempt_mgr_fini(adev);
2271 amdgpu_doorbell_fini(adev);
2272
2273 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
2274 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
2275 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
2276 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
2277 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
2278 ttm_device_fini(&adev->mman.bdev);
2279 adev->mman.initialized = false;
2280 dev_info(adev->dev, " ttm finalized\n");
2281 }
2282
2283 /**
2284 * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
2285 *
2286 * @adev: amdgpu_device pointer
2287 * @enable: true when we can use buffer functions.
2288 *
2289 * Enable/disable use of buffer functions during suspend/resume. This should
2290 * only be called at bootup or when userspace isn't running.
2291 */
amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device * adev,bool enable)2292 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
2293 {
2294 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
2295 uint64_t size;
2296 int r;
2297
2298 if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
2299 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
2300 return;
2301
2302 if (enable) {
2303 struct amdgpu_ring *ring;
2304 struct drm_gpu_scheduler *sched;
2305
2306 ring = adev->mman.buffer_funcs_ring;
2307 sched = &ring->sched;
2308 r = drm_sched_entity_init(&adev->mman.default_entity.base,
2309 DRM_SCHED_PRIORITY_KERNEL, &sched,
2310 1, NULL);
2311 if (r) {
2312 dev_err(adev->dev,
2313 "Failed setting up TTM BO move entity (%d)\n",
2314 r);
2315 return;
2316 }
2317
2318 r = drm_sched_entity_init(&adev->mman.clear_entity.base,
2319 DRM_SCHED_PRIORITY_NORMAL, &sched,
2320 1, NULL);
2321 if (r) {
2322 dev_err(adev->dev,
2323 "Failed setting up TTM BO clear entity (%d)\n",
2324 r);
2325 goto error_free_entity;
2326 }
2327
2328 r = drm_sched_entity_init(&adev->mman.move_entity.base,
2329 DRM_SCHED_PRIORITY_NORMAL, &sched,
2330 1, NULL);
2331 if (r) {
2332 dev_err(adev->dev,
2333 "Failed setting up TTM BO move entity (%d)\n",
2334 r);
2335 drm_sched_entity_destroy(&adev->mman.clear_entity.base);
2336 goto error_free_entity;
2337 }
2338 } else {
2339 drm_sched_entity_destroy(&adev->mman.default_entity.base);
2340 drm_sched_entity_destroy(&adev->mman.clear_entity.base);
2341 drm_sched_entity_destroy(&adev->mman.move_entity.base);
2342 /* Drop all the old fences since re-creating the scheduler entities
2343 * will allocate new contexts.
2344 */
2345 ttm_resource_manager_cleanup(man);
2346 }
2347
2348 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
2349 if (enable)
2350 size = adev->gmc.real_vram_size;
2351 else
2352 size = adev->gmc.visible_vram_size;
2353 man->size = size;
2354 adev->mman.buffer_funcs_enabled = enable;
2355
2356 return;
2357
2358 error_free_entity:
2359 drm_sched_entity_destroy(&adev->mman.default_entity.base);
2360 }
2361
amdgpu_ttm_prepare_job(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,unsigned int num_dw,struct dma_resv * resv,bool vm_needs_flush,struct amdgpu_job ** job,u64 k_job_id)2362 static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
2363 struct amdgpu_ttm_buffer_entity *entity,
2364 unsigned int num_dw,
2365 struct dma_resv *resv,
2366 bool vm_needs_flush,
2367 struct amdgpu_job **job,
2368 u64 k_job_id)
2369 {
2370 enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
2371 int r;
2372 r = amdgpu_job_alloc_with_ib(adev, &entity->base,
2373 AMDGPU_FENCE_OWNER_UNDEFINED,
2374 num_dw * 4, pool, job, k_job_id);
2375 if (r)
2376 return r;
2377
2378 if (vm_needs_flush) {
2379 (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
2380 adev->gmc.pdb0_bo :
2381 adev->gart.bo);
2382 (*job)->vm_needs_flush = true;
2383 }
2384 if (!resv)
2385 return 0;
2386
2387 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
2388 DMA_RESV_USAGE_BOOKKEEP);
2389 }
2390
amdgpu_copy_buffer(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,uint64_t src_offset,uint64_t dst_offset,uint32_t byte_count,struct dma_resv * resv,struct dma_fence ** fence,bool vm_needs_flush,uint32_t copy_flags)2391 int amdgpu_copy_buffer(struct amdgpu_device *adev,
2392 struct amdgpu_ttm_buffer_entity *entity,
2393 uint64_t src_offset,
2394 uint64_t dst_offset, uint32_t byte_count,
2395 struct dma_resv *resv,
2396 struct dma_fence **fence,
2397 bool vm_needs_flush, uint32_t copy_flags)
2398 {
2399 unsigned int num_loops, num_dw;
2400 struct amdgpu_ring *ring;
2401 struct amdgpu_job *job;
2402 uint32_t max_bytes;
2403 unsigned int i;
2404 int r;
2405
2406 ring = adev->mman.buffer_funcs_ring;
2407
2408 if (!ring->sched.ready) {
2409 dev_err(adev->dev,
2410 "Trying to move memory with ring turned off.\n");
2411 return -EINVAL;
2412 }
2413
2414 max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
2415 num_loops = DIV_ROUND_UP(byte_count, max_bytes);
2416 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
2417 r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
2418 resv, vm_needs_flush, &job,
2419 AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
2420 if (r)
2421 goto error_free;
2422
2423 for (i = 0; i < num_loops; i++) {
2424 uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
2425
2426 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
2427 dst_offset, cur_size_in_bytes, copy_flags);
2428 src_offset += cur_size_in_bytes;
2429 dst_offset += cur_size_in_bytes;
2430 byte_count -= cur_size_in_bytes;
2431 }
2432
2433 *fence = amdgpu_ttm_job_submit(adev, job, num_dw);
2434
2435 return 0;
2436
2437 error_free:
2438 amdgpu_job_free(job);
2439 dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
2440 return r;
2441 }
2442
amdgpu_ttm_fill_mem(struct amdgpu_device * adev,struct amdgpu_ttm_buffer_entity * entity,uint32_t src_data,uint64_t dst_addr,uint32_t byte_count,struct dma_resv * resv,struct dma_fence ** fence,bool vm_needs_flush,u64 k_job_id)2443 static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev,
2444 struct amdgpu_ttm_buffer_entity *entity,
2445 uint32_t src_data,
2446 uint64_t dst_addr, uint32_t byte_count,
2447 struct dma_resv *resv,
2448 struct dma_fence **fence,
2449 bool vm_needs_flush,
2450 u64 k_job_id)
2451 {
2452 unsigned int num_loops, num_dw;
2453 struct amdgpu_job *job;
2454 uint32_t max_bytes;
2455 unsigned int i;
2456 int r;
2457
2458 max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
2459 num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
2460 num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
2461 r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
2462 vm_needs_flush, &job, k_job_id);
2463 if (r)
2464 return r;
2465
2466 for (i = 0; i < num_loops; i++) {
2467 uint32_t cur_size = min(byte_count, max_bytes);
2468
2469 amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
2470 cur_size);
2471
2472 dst_addr += cur_size;
2473 byte_count -= cur_size;
2474 }
2475
2476 *fence = amdgpu_ttm_job_submit(adev, job, num_dw);
2477 return 0;
2478 }
2479
2480 /**
2481 * amdgpu_ttm_clear_buffer - clear memory buffers
2482 * @bo: amdgpu buffer object
2483 * @resv: reservation object
2484 * @fence: dma_fence associated with the operation
2485 *
2486 * Clear the memory buffer resource.
2487 *
2488 * Returns:
2489 * 0 for success or a negative error code on failure.
2490 */
amdgpu_ttm_clear_buffer(struct amdgpu_bo * bo,struct dma_resv * resv,struct dma_fence ** fence)2491 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
2492 struct dma_resv *resv,
2493 struct dma_fence **fence)
2494 {
2495 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2496 struct amdgpu_res_cursor cursor;
2497 u64 addr;
2498 int r = 0;
2499
2500 if (!adev->mman.buffer_funcs_enabled)
2501 return -EINVAL;
2502
2503 if (!fence)
2504 return -EINVAL;
2505
2506 *fence = dma_fence_get_stub();
2507
2508 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
2509
2510 mutex_lock(&adev->mman.gtt_window_lock);
2511 while (cursor.remaining) {
2512 struct dma_fence *next = NULL;
2513 u64 size;
2514
2515 if (amdgpu_res_cleared(&cursor)) {
2516 amdgpu_res_next(&cursor, cursor.size);
2517 continue;
2518 }
2519
2520 /* Never clear more than 256MiB at once to avoid timeouts */
2521 size = min(cursor.size, 256ULL << 20);
2522
2523 r = amdgpu_ttm_map_buffer(&adev->mman.clear_entity,
2524 &bo->tbo, bo->tbo.resource, &cursor,
2525 1, false, &size, &addr);
2526 if (r)
2527 goto err;
2528
2529 r = amdgpu_ttm_fill_mem(adev, &adev->mman.clear_entity, 0, addr, size, resv,
2530 &next, true,
2531 AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
2532 if (r)
2533 goto err;
2534
2535 dma_fence_put(*fence);
2536 *fence = next;
2537
2538 amdgpu_res_next(&cursor, size);
2539 }
2540 err:
2541 mutex_unlock(&adev->mman.gtt_window_lock);
2542
2543 return r;
2544 }
2545
amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity * entity,struct amdgpu_bo * bo,uint32_t src_data,struct dma_resv * resv,struct dma_fence ** f,u64 k_job_id)2546 int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity,
2547 struct amdgpu_bo *bo,
2548 uint32_t src_data,
2549 struct dma_resv *resv,
2550 struct dma_fence **f,
2551 u64 k_job_id)
2552 {
2553 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
2554 struct dma_fence *fence = NULL;
2555 struct amdgpu_res_cursor dst;
2556 int r;
2557
2558 if (!adev->mman.buffer_funcs_enabled) {
2559 dev_err(adev->dev,
2560 "Trying to clear memory with ring turned off.\n");
2561 return -EINVAL;
2562 }
2563
2564 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
2565
2566 mutex_lock(&adev->mman.gtt_window_lock);
2567 while (dst.remaining) {
2568 struct dma_fence *next;
2569 uint64_t cur_size, to;
2570
2571 /* Never fill more than 256MiB at once to avoid timeouts */
2572 cur_size = min(dst.size, 256ULL << 20);
2573
2574 r = amdgpu_ttm_map_buffer(entity, &bo->tbo, bo->tbo.resource, &dst,
2575 1, false, &cur_size, &to);
2576 if (r)
2577 goto error;
2578
2579 r = amdgpu_ttm_fill_mem(adev, entity,
2580 src_data, to, cur_size, resv,
2581 &next, true, k_job_id);
2582 if (r)
2583 goto error;
2584
2585 dma_fence_put(fence);
2586 fence = next;
2587
2588 amdgpu_res_next(&dst, cur_size);
2589 }
2590 error:
2591 mutex_unlock(&adev->mman.gtt_window_lock);
2592 if (f)
2593 *f = dma_fence_get(fence);
2594 dma_fence_put(fence);
2595 return r;
2596 }
2597
2598 /**
2599 * amdgpu_ttm_evict_resources - evict memory buffers
2600 * @adev: amdgpu device object
2601 * @mem_type: evicted BO's memory type
2602 *
2603 * Evicts all @mem_type buffers on the lru list of the memory type.
2604 *
2605 * Returns:
2606 * 0 for success or a negative error code on failure.
2607 */
amdgpu_ttm_evict_resources(struct amdgpu_device * adev,int mem_type)2608 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
2609 {
2610 struct ttm_resource_manager *man;
2611
2612 switch (mem_type) {
2613 case TTM_PL_VRAM:
2614 case TTM_PL_TT:
2615 case AMDGPU_PL_GWS:
2616 case AMDGPU_PL_GDS:
2617 case AMDGPU_PL_OA:
2618 man = ttm_manager_type(&adev->mman.bdev, mem_type);
2619 break;
2620 default:
2621 dev_err(adev->dev, "Trying to evict invalid memory type\n");
2622 return -EINVAL;
2623 }
2624
2625 return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
2626 }
2627
2628 #if defined(CONFIG_DEBUG_FS)
2629
amdgpu_ttm_page_pool_show(struct seq_file * m,void * unused)2630 static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
2631 {
2632 struct amdgpu_device *adev = m->private;
2633
2634 return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
2635 }
2636
2637 DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
2638
2639 /*
2640 * amdgpu_ttm_vram_read - Linear read access to VRAM
2641 *
2642 * Accesses VRAM via MMIO for debugging purposes.
2643 */
amdgpu_ttm_vram_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2644 static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
2645 size_t size, loff_t *pos)
2646 {
2647 struct amdgpu_device *adev = file_inode(f)->i_private;
2648 ssize_t result = 0;
2649
2650 if (size & 0x3 || *pos & 0x3)
2651 return -EINVAL;
2652
2653 if (*pos >= adev->gmc.mc_vram_size)
2654 return -ENXIO;
2655
2656 size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
2657 while (size) {
2658 size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
2659 uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
2660
2661 amdgpu_device_vram_access(adev, *pos, value, bytes, false);
2662 if (copy_to_user(buf, value, bytes))
2663 return -EFAULT;
2664
2665 result += bytes;
2666 buf += bytes;
2667 *pos += bytes;
2668 size -= bytes;
2669 }
2670
2671 return result;
2672 }
2673
2674 /*
2675 * amdgpu_ttm_vram_write - Linear write access to VRAM
2676 *
2677 * Accesses VRAM via MMIO for debugging purposes.
2678 */
amdgpu_ttm_vram_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2679 static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
2680 size_t size, loff_t *pos)
2681 {
2682 struct amdgpu_device *adev = file_inode(f)->i_private;
2683 ssize_t result = 0;
2684 int r;
2685
2686 if (size & 0x3 || *pos & 0x3)
2687 return -EINVAL;
2688
2689 if (*pos >= adev->gmc.mc_vram_size)
2690 return -ENXIO;
2691
2692 while (size) {
2693 uint32_t value;
2694
2695 if (*pos >= adev->gmc.mc_vram_size)
2696 return result;
2697
2698 r = get_user(value, (uint32_t *)buf);
2699 if (r)
2700 return r;
2701
2702 amdgpu_device_mm_access(adev, *pos, &value, 4, true);
2703
2704 result += 4;
2705 buf += 4;
2706 *pos += 4;
2707 size -= 4;
2708 }
2709
2710 return result;
2711 }
2712
2713 static const struct file_operations amdgpu_ttm_vram_fops = {
2714 .owner = THIS_MODULE,
2715 .read = amdgpu_ttm_vram_read,
2716 .write = amdgpu_ttm_vram_write,
2717 .llseek = default_llseek,
2718 };
2719
2720 /*
2721 * amdgpu_iomem_read - Virtual read access to GPU mapped memory
2722 *
2723 * This function is used to read memory that has been mapped to the
2724 * GPU and the known addresses are not physical addresses but instead
2725 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2726 */
amdgpu_iomem_read(struct file * f,char __user * buf,size_t size,loff_t * pos)2727 static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
2728 size_t size, loff_t *pos)
2729 {
2730 struct amdgpu_device *adev = file_inode(f)->i_private;
2731 struct iommu_domain *dom;
2732 ssize_t result = 0;
2733 int r;
2734
2735 /* retrieve the IOMMU domain if any for this device */
2736 dom = iommu_get_domain_for_dev(adev->dev);
2737
2738 while (size) {
2739 phys_addr_t addr = *pos & PAGE_MASK;
2740 loff_t off = *pos & ~PAGE_MASK;
2741 size_t bytes = PAGE_SIZE - off;
2742 unsigned long pfn;
2743 struct page *p;
2744 void *ptr;
2745
2746 bytes = min(bytes, size);
2747
2748 /* Translate the bus address to a physical address. If
2749 * the domain is NULL it means there is no IOMMU active
2750 * and the address translation is the identity
2751 */
2752 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2753
2754 pfn = addr >> PAGE_SHIFT;
2755 if (!pfn_valid(pfn))
2756 return -EPERM;
2757
2758 p = pfn_to_page(pfn);
2759 if (p->mapping != adev->mman.bdev.dev_mapping)
2760 return -EPERM;
2761
2762 ptr = kmap_local_page(p);
2763 r = copy_to_user(buf, ptr + off, bytes);
2764 kunmap_local(ptr);
2765 if (r)
2766 return -EFAULT;
2767
2768 size -= bytes;
2769 *pos += bytes;
2770 result += bytes;
2771 }
2772
2773 return result;
2774 }
2775
2776 /*
2777 * amdgpu_iomem_write - Virtual write access to GPU mapped memory
2778 *
2779 * This function is used to write memory that has been mapped to the
2780 * GPU and the known addresses are not physical addresses but instead
2781 * bus addresses (e.g., what you'd put in an IB or ring buffer).
2782 */
amdgpu_iomem_write(struct file * f,const char __user * buf,size_t size,loff_t * pos)2783 static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
2784 size_t size, loff_t *pos)
2785 {
2786 struct amdgpu_device *adev = file_inode(f)->i_private;
2787 struct iommu_domain *dom;
2788 ssize_t result = 0;
2789 int r;
2790
2791 dom = iommu_get_domain_for_dev(adev->dev);
2792
2793 while (size) {
2794 phys_addr_t addr = *pos & PAGE_MASK;
2795 loff_t off = *pos & ~PAGE_MASK;
2796 size_t bytes = PAGE_SIZE - off;
2797 unsigned long pfn;
2798 struct page *p;
2799 void *ptr;
2800
2801 bytes = min(bytes, size);
2802
2803 addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
2804
2805 pfn = addr >> PAGE_SHIFT;
2806 if (!pfn_valid(pfn))
2807 return -EPERM;
2808
2809 p = pfn_to_page(pfn);
2810 if (p->mapping != adev->mman.bdev.dev_mapping)
2811 return -EPERM;
2812
2813 ptr = kmap_local_page(p);
2814 r = copy_from_user(ptr + off, buf, bytes);
2815 kunmap_local(ptr);
2816 if (r)
2817 return -EFAULT;
2818
2819 size -= bytes;
2820 *pos += bytes;
2821 result += bytes;
2822 }
2823
2824 return result;
2825 }
2826
2827 static const struct file_operations amdgpu_ttm_iomem_fops = {
2828 .owner = THIS_MODULE,
2829 .read = amdgpu_iomem_read,
2830 .write = amdgpu_iomem_write,
2831 .llseek = default_llseek
2832 };
2833
2834 #endif
2835
amdgpu_ttm_debugfs_init(struct amdgpu_device * adev)2836 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
2837 {
2838 #if defined(CONFIG_DEBUG_FS)
2839 struct drm_minor *minor = adev_to_drm(adev)->primary;
2840 struct dentry *root = minor->debugfs_root;
2841
2842 debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
2843 &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
2844 debugfs_create_file("amdgpu_iomem", 0444, root, adev,
2845 &amdgpu_ttm_iomem_fops);
2846 debugfs_create_file("ttm_page_pool", 0444, root, adev,
2847 &amdgpu_ttm_page_pool_fops);
2848 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2849 TTM_PL_VRAM),
2850 root, "amdgpu_vram_mm");
2851 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2852 TTM_PL_TT),
2853 root, "amdgpu_gtt_mm");
2854 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2855 AMDGPU_PL_GDS),
2856 root, "amdgpu_gds_mm");
2857 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2858 AMDGPU_PL_GWS),
2859 root, "amdgpu_gws_mm");
2860 ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
2861 AMDGPU_PL_OA),
2862 root, "amdgpu_oa_mm");
2863
2864 #endif
2865 }
2866