xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gr.c (revision bc7849720b5275297b58db73a20d9a15dda5f353)
1 /*
2  * Copyright 2023 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <rm/gr.h>
23 
24 #include <core/memory.h>
25 #include <subdev/gsp.h>
26 #include <subdev/mmu/vmm.h>
27 #include <engine/fifo/priv.h>
28 #include <engine/gr/priv.h>
29 
30 #include <nvif/if900d.h>
31 
32 #include <nvhw/drf.h>
33 
34 #include "nvrm/gr.h"
35 #include "nvrm/vmm.h"
36 
37 #define r535_gr(p) container_of((p), struct r535_gr, base)
38 
39 static void *
r535_gr_chan_dtor(struct nvkm_object * object)40 r535_gr_chan_dtor(struct nvkm_object *object)
41 {
42 	struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
43 	struct r535_gr *gr = grc->gr;
44 
45 	for (int i = 0; i < gr->ctxbuf_nr; i++) {
46 		nvkm_vmm_put(grc->vmm, &grc->vma[i]);
47 		nvkm_memory_unref(&grc->mem[i]);
48 	}
49 
50 	nvkm_vmm_unref(&grc->vmm);
51 	return grc;
52 }
53 
54 static const struct nvkm_object_func
55 r535_gr_chan = {
56 	.dtor = r535_gr_chan_dtor,
57 };
58 
59 int
r535_gr_promote_ctx(struct r535_gr * gr,bool golden,struct nvkm_vmm * vmm,struct nvkm_memory ** pmem,struct nvkm_vma ** pvma,struct nvkm_gsp_object * chan)60 r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
61 		    struct nvkm_memory **pmem, struct nvkm_vma **pvma,
62 		    struct nvkm_gsp_object *chan)
63 {
64 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
65 	struct nvkm_device *device = subdev->device;
66 	NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
67 
68 	ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
69 				    NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
70 	if (WARN_ON(IS_ERR(ctrl)))
71 		return PTR_ERR(ctrl);
72 
73 	ctrl->engineType = 1;
74 	ctrl->hChanClient = vmm->rm.client.object.handle;
75 	ctrl->hObject = chan->handle;
76 
77 	for (int i = 0; i < gr->ctxbuf_nr; i++) {
78 		NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
79 			&ctrl->promoteEntry[ctrl->entryCount];
80 		const bool alloc = golden || !gr->ctxbuf[i].global;
81 		int ret;
82 
83 		entry->bufferId = gr->ctxbuf[i].bufferId;
84 		entry->bInitialize = gr->ctxbuf[i].init && alloc;
85 
86 		if (alloc) {
87 			ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
88 					      NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
89 					      gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
90 					      gr->ctxbuf[i].init, &pmem[i]);
91 			if (WARN_ON(ret))
92 				return ret;
93 
94 			if (gr->ctxbuf[i].bufferId ==
95 					NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
96 				entry->bNonmapped = 1;
97 		} else {
98 			if (gr->ctxbuf[i].bufferId ==
99 				NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
100 				continue;
101 
102 			pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
103 		}
104 
105 		if (!entry->bNonmapped) {
106 			struct gf100_vmm_map_v0 args = {
107 				.priv = 1,
108 				.ro   = gr->ctxbuf[i].ro,
109 			};
110 
111 			mutex_lock(&vmm->mutex.vmm);
112 			ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
113 						  nvkm_memory_size(pmem[i]), &pvma[i]);
114 			mutex_unlock(&vmm->mutex.vmm);
115 			if (ret)
116 				return ret;
117 
118 			ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
119 			if (ret)
120 				return ret;
121 
122 			entry->gpuVirtAddr = pvma[i]->addr;
123 		}
124 
125 		if (entry->bInitialize) {
126 			entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
127 			entry->size = gr->ctxbuf[i].size;
128 			entry->physAttr = 4;
129 		}
130 
131 		nvkm_debug(subdev,
132 			   "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
133 			   entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
134 			   entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
135 
136 		ctrl->entryCount++;
137 	}
138 
139 	return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
140 }
141 
142 int
r535_gr_chan_new(struct nvkm_gr * base,struct nvkm_chan * chan,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)143 r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
144 		 struct nvkm_object **pobject)
145 {
146 	struct r535_gr *gr = r535_gr(base);
147 	struct r535_gr_chan *grc;
148 	int ret;
149 
150 	if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
151 		return -ENOMEM;
152 
153 	nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
154 	grc->gr = gr;
155 	grc->vmm = nvkm_vmm_ref(chan->vmm);
156 	grc->chan = chan;
157 	*pobject = &grc->object;
158 
159 	ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
160 	if (ret)
161 		return ret;
162 
163 	return 0;
164 }
165 
166 u64
r535_gr_units(struct nvkm_gr * gr)167 r535_gr_units(struct nvkm_gr *gr)
168 {
169 	struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
170 
171 	return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
172 }
173 
174 void
r535_gr_get_ctxbuf_info(struct r535_gr * gr,int i,struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO * info)175 r535_gr_get_ctxbuf_info(struct r535_gr *gr, int i,
176 			struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO *info)
177 {
178 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
179 	static const struct {
180 		u32     id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
181 		u32     id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
182 		bool global;
183 		bool   init;
184 		bool     ro;
185 	} map[] = {
186 #define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
187 		.id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
188 		.global = (G), .init = (I), .ro = (R) }
189 #define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
190 		/*                                       global   init     ro */
191 		_A(           GRAPHICS,             MAIN, false,  true, false),
192 		_B(                                PATCH, false,  true, false),
193 		_A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB,  true, false, false),
194 		_B(                             PAGEPOOL,  true, false, false),
195 		_B(                         ATTRIBUTE_CB,  true, false, false),
196 		_B(                        RTV_CB_GLOBAL,  true, false, false),
197 		_B(                           FECS_EVENT,  true,  true, false),
198 		_B(                      PRIV_ACCESS_MAP,  true,  true,  true),
199 #undef _B
200 #undef _A
201 	};
202 	u32 size = info->size;
203 	u8 align, page;
204 	int id;
205 
206 	for (id = 0; id < ARRAY_SIZE(map); id++) {
207 		if (map[id].id0 == i)
208 			break;
209 	}
210 
211 	nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
212 		   size, (id < ARRAY_SIZE(map)) ? "*" : "");
213 	if (id >= ARRAY_SIZE(map))
214 		return;
215 
216 	if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
217 		size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
218 
219 	if      (size >= 1 << 21) page = 21;
220 	else if (size >= 1 << 16) page = 16;
221 	else			  page = 12;
222 
223 	if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
224 		align = order_base_2(size);
225 	else
226 		align = page;
227 
228 	if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
229 		return;
230 
231 	gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
232 	gr->ctxbuf[gr->ctxbuf_nr].size     = size;
233 	gr->ctxbuf[gr->ctxbuf_nr].page     = page;
234 	gr->ctxbuf[gr->ctxbuf_nr].align    = align;
235 	gr->ctxbuf[gr->ctxbuf_nr].global   = map[id].global;
236 	gr->ctxbuf[gr->ctxbuf_nr].init     = map[id].init;
237 	gr->ctxbuf[gr->ctxbuf_nr].ro       = map[id].ro;
238 	gr->ctxbuf_nr++;
239 
240 	if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
241 		if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
242 			return;
243 
244 		gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
245 		gr->ctxbuf[gr->ctxbuf_nr].bufferId =
246 			NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
247 		gr->ctxbuf_nr++;
248 	}
249 }
250 
251 static int
r535_gr_get_ctxbufs_info(struct r535_gr * gr)252 r535_gr_get_ctxbufs_info(struct r535_gr *gr)
253 {
254 	NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
255 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
256 	struct nvkm_gsp *gsp = subdev->device->gsp;
257 
258 	info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
259 				   NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
260 				   sizeof(*info));
261 	if (WARN_ON(IS_ERR(info)))
262 		return PTR_ERR(info);
263 
264 	for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
265 		r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
266 
267 	nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
268 	return 0;
269 }
270 
271 int
r535_gr_oneinit(struct nvkm_gr * base)272 r535_gr_oneinit(struct nvkm_gr *base)
273 {
274 	struct r535_gr *gr = container_of(base, typeof(*gr), base);
275 	struct nvkm_subdev *subdev = &gr->base.engine.subdev;
276 	struct nvkm_device *device = subdev->device;
277 	struct nvkm_gsp *gsp = device->gsp;
278 	struct nvkm_rm *rm = gsp->rm;
279 	struct {
280 		struct nvkm_memory *inst;
281 		struct nvkm_vmm *vmm;
282 		struct nvkm_gsp_object chan;
283 		struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
284 	} golden = {};
285 	struct nvkm_gsp_object threed;
286 	int ret;
287 
288 	/* Allocate a channel to use for golden context init. */
289 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
290 	if (ret)
291 		goto done;
292 
293 	ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
294 	if (ret)
295 		goto done;
296 
297 	ret = r535_mmu_vaspace_new(golden.vmm, NVKM_RM_VASPACE, false);
298 	if (ret)
299 		goto done;
300 
301 	ret = rm->api->fifo->chan.alloc(&golden.vmm->rm.device, NVKM_RM_CHAN(0),
302 					1, 0, true, rm->api->fifo->rsvd_chids,
303 					nvkm_memory_addr(golden.inst),
304 					nvkm_memory_addr(golden.inst) + 0x1000,
305 					nvkm_memory_addr(golden.inst) + 0x2000,
306 					golden.vmm, 0, 0x1000, &golden.chan);
307 	if (ret)
308 		goto done;
309 
310 	/* Fetch context buffer info from RM and allocate each of them here to use
311 	 * during golden context init (or later as a global context buffer).
312 	 *
313 	 * Also build the information that'll be used to create channel contexts.
314 	 */
315 	ret = rm->api->gr->get_ctxbufs_info(gr);
316 	if (ret)
317 		goto done;
318 
319 	/* Promote golden context to RM. */
320 	ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
321 	if (ret)
322 		goto done;
323 
324 	/* Allocate 3D class on channel to trigger golden context init in RM. */
325 	ret = nvkm_gsp_rm_alloc(&golden.chan, NVKM_RM_THREED, rm->gpu->gr.class.threed, 0, &threed);
326 	if (ret)
327 		goto done;
328 
329 	/* There's no need to keep the golden channel around, as RM caches the context. */
330 	nvkm_gsp_rm_free(&threed);
331 done:
332 	nvkm_gsp_rm_free(&golden.chan);
333 	for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
334 		nvkm_vmm_put(golden.vmm, &golden.vma[i]);
335 	nvkm_vmm_unref(&golden.vmm);
336 	nvkm_memory_unref(&golden.inst);
337 	return ret;
338 
339 }
340 
341 void *
r535_gr_dtor(struct nvkm_gr * base)342 r535_gr_dtor(struct nvkm_gr *base)
343 {
344 	struct r535_gr *gr = r535_gr(base);
345 
346 	while (gr->ctxbuf_nr)
347 		nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
348 
349 	kfree(gr->base.func);
350 	return gr;
351 }
352 
353 const struct nvkm_rm_api_gr
354 r535_gr = {
355 	.get_ctxbufs_info = r535_gr_get_ctxbufs_info,
356 };
357