1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_sa.h"
7
8 #include <linux/kernel.h>
9
10 #include <drm/drm_managed.h>
11
12 #include "xe_bo.h"
13 #include "xe_device.h"
14 #include "xe_map.h"
15
xe_sa_bo_manager_fini(struct drm_device * drm,void * arg)16 static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
17 {
18 struct xe_sa_manager *sa_manager = arg;
19 struct xe_bo *bo = sa_manager->bo;
20
21 if (!bo) {
22 drm_err(drm, "no bo for sa manager\n");
23 return;
24 }
25
26 drm_suballoc_manager_fini(&sa_manager->base);
27
28 if (sa_manager->is_iomem)
29 kvfree(sa_manager->cpu_ptr);
30
31 sa_manager->bo = NULL;
32 }
33
34 /**
35 * __xe_sa_bo_manager_init() - Create and initialize the suballocator
36 * @tile: the &xe_tile where allocate
37 * @size: number of bytes to allocate
38 * @guard: number of bytes to exclude from suballocations
39 * @align: alignment for each suballocated chunk
40 *
41 * Prepares the suballocation manager for suballocations.
42 *
43 * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure.
44 */
__xe_sa_bo_manager_init(struct xe_tile * tile,u32 size,u32 guard,u32 align)45 struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align)
46 {
47 struct xe_device *xe = tile_to_xe(tile);
48 struct xe_sa_manager *sa_manager;
49 u32 managed_size;
50 struct xe_bo *bo;
51 int ret;
52
53 xe_tile_assert(tile, size > guard);
54 managed_size = size - guard;
55
56 sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL);
57 if (!sa_manager)
58 return ERR_PTR(-ENOMEM);
59
60 bo = xe_managed_bo_create_pin_map(xe, tile, size,
61 XE_BO_FLAG_VRAM_IF_DGFX(tile) |
62 XE_BO_FLAG_GGTT |
63 XE_BO_FLAG_GGTT_INVALIDATE);
64 if (IS_ERR(bo)) {
65 drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
66 size / SZ_1K, bo);
67 return ERR_CAST(bo);
68 }
69 sa_manager->bo = bo;
70 sa_manager->is_iomem = bo->vmap.is_iomem;
71 sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
72
73 if (bo->vmap.is_iomem) {
74 sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
75 if (!sa_manager->cpu_ptr)
76 return ERR_PTR(-ENOMEM);
77 } else {
78 sa_manager->cpu_ptr = bo->vmap.vaddr;
79 memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
80 }
81
82 drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
83 ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
84 sa_manager);
85 if (ret)
86 return ERR_PTR(ret);
87
88 return sa_manager;
89 }
90
91 /**
92 * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags.
93 * @sa_manager: the &xe_sa_manager
94 * @size: number of bytes we want to suballocate
95 * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL.
96 *
97 * Try to make a suballocation of size @size.
98 *
99 * Return: a &drm_suballoc, or an ERR_PTR.
100 */
__xe_sa_bo_new(struct xe_sa_manager * sa_manager,u32 size,gfp_t gfp)101 struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp)
102 {
103 /*
104 * BB to large, return -ENOBUFS indicating user should split
105 * array of binds into smaller chunks.
106 */
107 if (size > sa_manager->base.size)
108 return ERR_PTR(-ENOBUFS);
109
110 return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0);
111 }
112
xe_sa_bo_flush_write(struct drm_suballoc * sa_bo)113 void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
114 {
115 struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
116 struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
117
118 if (!sa_manager->bo->vmap.is_iomem)
119 return;
120
121 xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo),
122 xe_sa_bo_cpu_addr(sa_bo),
123 drm_suballoc_size(sa_bo));
124 }
125
xe_sa_bo_free(struct drm_suballoc * sa_bo,struct dma_fence * fence)126 void xe_sa_bo_free(struct drm_suballoc *sa_bo,
127 struct dma_fence *fence)
128 {
129 drm_suballoc_free(sa_bo, fence);
130 }
131