1 /* SPDX-License-Identifier: MIT
2 *
3 * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
4 */
5 #include <rm/gr.h>
6
7 #include <subdev/mmu.h>
8 #include <engine/fifo.h>
9 #include <engine/fifo/chid.h>
10 #include <engine/gr/priv.h>
11
12 #include "nvrm/gr.h"
13 #include "nvrm/engine.h"
14
15 int
r570_gr_tpc_mask(struct nvkm_gsp * gsp,int gpc,u32 * pmask)16 r570_gr_tpc_mask(struct nvkm_gsp *gsp, int gpc, u32 *pmask)
17 {
18 NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS *ctrl;
19 int ret;
20
21 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
22 NV2080_CTRL_CMD_GPU_GET_FERMI_TPC_INFO, sizeof(*ctrl));
23 if (IS_ERR(ctrl))
24 return PTR_ERR(ctrl);
25
26 ctrl->gpcId = gpc;
27
28 ret = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, &ctrl, sizeof(*ctrl));
29 if (ret)
30 return ret;
31
32 *pmask = ctrl->tpcMask;
33
34 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
35 return 0;
36 }
37
38 int
r570_gr_gpc_mask(struct nvkm_gsp * gsp,u32 * pmask)39 r570_gr_gpc_mask(struct nvkm_gsp *gsp, u32 *pmask)
40 {
41 NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS *ctrl;
42
43 ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
44 NV2080_CTRL_CMD_GPU_GET_FERMI_GPC_INFO, sizeof(*ctrl));
45 if (IS_ERR(ctrl))
46 return PTR_ERR(ctrl);
47
48 *pmask = ctrl->gpcMask;
49
50 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
51 return 0;
52 }
53
54 static int
r570_gr_scrubber_ctrl(struct r535_gr * gr,bool teardown)55 r570_gr_scrubber_ctrl(struct r535_gr *gr, bool teardown)
56 {
57 NV2080_CTRL_INTERNAL_GR_INIT_BUG4208224_WAR_PARAMS *ctrl;
58
59 ctrl = nvkm_gsp_rm_ctrl_get(&gr->scrubber.vmm->rm.device.subdevice,
60 NV2080_CTRL_CMD_INTERNAL_KGR_INIT_BUG4208224_WAR,
61 sizeof(*ctrl));
62 if (IS_ERR(ctrl))
63 return PTR_ERR(ctrl);
64
65 ctrl->bTeardown = teardown;
66
67 return nvkm_gsp_rm_ctrl_wr(&gr->scrubber.vmm->rm.device.subdevice, ctrl);
68 }
69
70 static void
r570_gr_scrubber_fini(struct r535_gr * gr)71 r570_gr_scrubber_fini(struct r535_gr *gr)
72 {
73 /* Teardown scrubber channel on RM. */
74 if (gr->scrubber.enabled) {
75 WARN_ON(r570_gr_scrubber_ctrl(gr, true));
76 gr->scrubber.enabled = false;
77 }
78
79 /* Free scrubber channel. */
80 nvkm_gsp_rm_free(&gr->scrubber.threed);
81 nvkm_gsp_rm_free(&gr->scrubber.chan);
82
83 for (int i = 0; i < gr->ctxbuf_nr; i++) {
84 nvkm_vmm_put(gr->scrubber.vmm, &gr->scrubber.ctxbuf.vma[i]);
85 nvkm_memory_unref(&gr->scrubber.ctxbuf.mem[i]);
86 }
87
88 nvkm_vmm_unref(&gr->scrubber.vmm);
89 nvkm_memory_unref(&gr->scrubber.inst);
90 }
91
92 static int
r570_gr_scrubber_init(struct r535_gr * gr)93 r570_gr_scrubber_init(struct r535_gr *gr)
94 {
95 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
96 struct nvkm_device *device = subdev->device;
97 struct nvkm_gsp *gsp = device->gsp;
98 struct nvkm_rm *rm = gsp->rm;
99 int ret;
100
101 /* Scrubber channel only required on TU10x. */
102 switch (device->chipset) {
103 case 0x162:
104 case 0x164:
105 case 0x166:
106 break;
107 default:
108 return 0;
109 }
110
111 if (gr->scrubber.chid < 0) {
112 gr->scrubber.chid = nvkm_chid_get(device->fifo->chid, NULL);
113 if (gr->scrubber.chid < 0)
114 return gr->scrubber.chid;
115 }
116
117 /* Allocate scrubber channel. */
118 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
119 0x2000 + device->fifo->rm.mthdbuf_size, 0, true,
120 &gr->scrubber.inst);
121 if (ret)
122 goto done;
123
124 ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grScrubberVmm",
125 &gr->scrubber.vmm);
126 if (ret)
127 goto done;
128
129 ret = r535_mmu_vaspace_new(gr->scrubber.vmm, KGRAPHICS_SCRUBBER_HANDLE_VAS, false);
130 if (ret)
131 goto done;
132
133 ret = rm->api->fifo->chan.alloc(&gr->scrubber.vmm->rm.device, KGRAPHICS_SCRUBBER_HANDLE_CHANNEL,
134 NV2080_ENGINE_TYPE_GR0, 0, false, gr->scrubber.chid,
135 nvkm_memory_addr(gr->scrubber.inst),
136 nvkm_memory_addr(gr->scrubber.inst) + 0x1000,
137 nvkm_memory_addr(gr->scrubber.inst) + 0x2000,
138 gr->scrubber.vmm, 0, 0x1000, &gr->scrubber.chan);
139 if (ret)
140 goto done;
141
142 ret = r535_gr_promote_ctx(gr, false, gr->scrubber.vmm, gr->scrubber.ctxbuf.mem,
143 gr->scrubber.ctxbuf.vma, &gr->scrubber.chan);
144 if (ret)
145 goto done;
146
147 ret = nvkm_gsp_rm_alloc(&gr->scrubber.chan, KGRAPHICS_SCRUBBER_HANDLE_3DOBJ,
148 rm->gpu->gr.class.threed, 0, &gr->scrubber.threed);
149 if (ret)
150 goto done;
151
152 /* Initialise scrubber channel on RM. */
153 ret = r570_gr_scrubber_ctrl(gr, false);
154 if (ret)
155 goto done;
156
157 gr->scrubber.enabled = true;
158
159 done:
160 if (ret)
161 r570_gr_scrubber_fini(gr);
162
163 return ret;
164 }
165
166 static int
r570_gr_get_ctxbufs_info(struct r535_gr * gr)167 r570_gr_get_ctxbufs_info(struct r535_gr *gr)
168 {
169 NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
170 struct nvkm_subdev *subdev = &gr->base.engine.subdev;
171 struct nvkm_gsp *gsp = subdev->device->gsp;
172
173 info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
174 NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
175 sizeof(*info));
176 if (WARN_ON(IS_ERR(info)))
177 return PTR_ERR(info);
178
179 for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++)
180 r535_gr_get_ctxbuf_info(gr, i, &info->engineContextBuffersInfo[0].engine[i]);
181
182 nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
183 return 0;
184 }
185
186 const struct nvkm_rm_api_gr
187 r570_gr = {
188 .get_ctxbufs_info = r570_gr_get_ctxbufs_info,
189 .scrubber.init = r570_gr_scrubber_init,
190 .scrubber.fini = r570_gr_scrubber_fini,
191 };
192