1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <linux/cleanup.h>
7 #include <drm/drm_managed.h>
8 
9 #include "xe_assert.h"
10 #include "xe_bo.h"
11 #include "xe_gt_printk.h"
12 #include "xe_guc.h"
13 #include "xe_guc_buf.h"
14 #include "xe_sa.h"
15 
cache_to_guc(struct xe_guc_buf_cache * cache)16 static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
17 {
18 	return container_of(cache, struct xe_guc, buf);
19 }
20 
cache_to_gt(struct xe_guc_buf_cache * cache)21 static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
22 {
23 	return guc_to_gt(cache_to_guc(cache));
24 }
25 
26 /**
27  * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
28  * @cache: the &xe_guc_buf_cache to initialize
29  *
30  * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
31  * indirect H2G data to GuC without a need to create a ad-hoc allocation.
32  *
33  * Return: 0 on success or a negative error code on failure.
34  */
xe_guc_buf_cache_init(struct xe_guc_buf_cache * cache)35 int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
36 {
37 	struct xe_gt *gt = cache_to_gt(cache);
38 	struct xe_sa_manager *sam;
39 
40 	/* XXX: currently it's useful only for the PF actions */
41 	if (!IS_SRIOV_PF(gt_to_xe(gt)))
42 		return 0;
43 
44 	sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
45 	if (IS_ERR(sam))
46 		return PTR_ERR(sam);
47 	cache->sam = sam;
48 
49 	xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n",
50 		  xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo),
51 		  __builtin_return_address(0));
52 	return 0;
53 }
54 
55 /**
56  * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
57  * @cache: the &xe_guc_buf_cache to query
58  *
59  * Return: a size of the largest reusable buffer (in dwords)
60  */
xe_guc_buf_cache_dwords(struct xe_guc_buf_cache * cache)61 u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache)
62 {
63 	return cache->sam ? cache->sam->base.size / sizeof(u32) : 0;
64 }
65 
66 /**
67  * xe_guc_buf_reserve() - Reserve a new sub-allocation.
68  * @cache: the &xe_guc_buf_cache where reserve sub-allocation
69  * @dwords: the requested size of the buffer in dwords
70  *
71  * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid.
72  * Must use xe_guc_buf_release() to release a sub-allocation.
73  *
74  * Return: a &xe_guc_buf of new sub-allocation.
75  */
xe_guc_buf_reserve(struct xe_guc_buf_cache * cache,u32 dwords)76 struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords)
77 {
78 	struct drm_suballoc *sa;
79 
80 	if (cache->sam)
81 		sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(u32), GFP_ATOMIC);
82 	else
83 		sa = ERR_PTR(-EOPNOTSUPP);
84 
85 	return (struct xe_guc_buf){ .sa = sa };
86 }
87 
88 /**
89  * xe_guc_buf_from_data() - Reserve a new sub-allocation using data.
90  * @cache: the &xe_guc_buf_cache where reserve sub-allocation
91  * @data: the data to flush the sub-allocation
92  * @size: the size of the data
93  *
94  * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory.
95  *
96  * Return: a &xe_guc_buf of new sub-allocation.
97  */
xe_guc_buf_from_data(struct xe_guc_buf_cache * cache,const void * data,size_t size)98 struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
99 				       const void *data, size_t size)
100 {
101 	struct drm_suballoc *sa;
102 
103 	sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC);
104 	if (!IS_ERR(sa))
105 		memcpy(xe_sa_bo_cpu_addr(sa), data, size);
106 
107 	return (struct xe_guc_buf){ .sa = sa };
108 }
109 
110 /**
111  * xe_guc_buf_release() - Release a sub-allocation.
112  * @buf: the &xe_guc_buf to release
113  *
114  * Releases a sub-allocation reserved by the xe_guc_buf_reserve().
115  */
xe_guc_buf_release(const struct xe_guc_buf buf)116 void xe_guc_buf_release(const struct xe_guc_buf buf)
117 {
118 	if (xe_guc_buf_is_valid(buf))
119 		xe_sa_bo_free(buf.sa, NULL);
120 }
121 
122 /**
123  * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
124  * @buf: the &xe_guc_buf to flush
125  *
126  * Return: a GPU address of the sub-allocation.
127  */
xe_guc_buf_flush(const struct xe_guc_buf buf)128 u64 xe_guc_buf_flush(const struct xe_guc_buf buf)
129 {
130 	xe_sa_bo_flush_write(buf.sa);
131 	return xe_sa_bo_gpu_addr(buf.sa);
132 }
133 
134 /**
135  * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation.
136  * @buf: the &xe_guc_buf to query
137  *
138  * Return: a CPU pointer of the sub-allocation.
139  */
xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)140 void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)
141 {
142 	return xe_sa_bo_cpu_addr(buf.sa);
143 }
144 
145 /**
146  * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation.
147  * @buf: the &xe_guc_buf to query
148  *
149  * Return: a GPU address of the sub-allocation.
150  */
xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)151 u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)
152 {
153 	return xe_sa_bo_gpu_addr(buf.sa);
154 }
155 
156 /**
157  * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer.
158  * @cache: the &xe_guc_buf_cache with sub-allocations
159  * @ptr: the CPU pointer of the sub-allocation
160  * @size: the size of the data
161  *
162  * Return: a GPU address on success or 0 if the pointer was unrelated.
163  */
xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache * cache,const void * ptr,u32 size)164 u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size)
165 {
166 	ptrdiff_t offset = ptr - cache->sam->cpu_ptr;
167 
168 	if (offset < 0 || offset + size > cache->sam->base.size)
169 		return 0;
170 
171 	return cache->sam->gpu_addr + offset;
172 }
173 
174 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
175 #include "tests/xe_guc_buf_kunit.c"
176 #endif
177