1 /* SPDX-License-Identifier: MIT
2 *
3 * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
4 */
5 #include <rm/rm.h>
6 #include <rm/rpc.h>
7
8 #include <asm-generic/video.h>
9
10 #include "nvrm/gsp.h"
11 #include "nvrm/rpcfn.h"
12 #include "nvrm/msgfn.h"
13
14 #include <core/pci.h>
15 #include <subdev/pci/priv.h>
16
17 static u32
r570_gsp_sr_data_size(struct nvkm_gsp * gsp)18 r570_gsp_sr_data_size(struct nvkm_gsp *gsp)
19 {
20 GspFwWprMeta *meta = gsp->wpr_meta.data;
21
22 return (meta->frtsOffset + meta->frtsSize) -
23 (meta->nonWprHeapOffset + meta->nonWprHeapSize);
24 }
25
26 static void
r570_gsp_drop_post_nocat_record(struct nvkm_gsp * gsp)27 r570_gsp_drop_post_nocat_record(struct nvkm_gsp *gsp)
28 {
29 if (gsp->subdev.debug < NV_DBG_DEBUG) {
30 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD, NULL, NULL);
31 r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE, NULL, NULL);
32 }
33 }
34
35 static bool
r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx,enum nvkm_subdev_type * ptype,int * pinst)36 r570_gsp_xlat_mc_engine_idx(u32 mc_engine_idx, enum nvkm_subdev_type *ptype, int *pinst)
37 {
38 switch (mc_engine_idx) {
39 case MC_ENGINE_IDX_GSP:
40 *ptype = NVKM_SUBDEV_GSP;
41 *pinst = 0;
42 return true;
43 case MC_ENGINE_IDX_DISP:
44 *ptype = NVKM_ENGINE_DISP;
45 *pinst = 0;
46 return true;
47 case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE19:
48 *ptype = NVKM_ENGINE_CE;
49 *pinst = mc_engine_idx - MC_ENGINE_IDX_CE0;
50 return true;
51 case MC_ENGINE_IDX_GR0:
52 *ptype = NVKM_ENGINE_GR;
53 *pinst = 0;
54 return true;
55 case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
56 *ptype = NVKM_ENGINE_NVDEC;
57 *pinst = mc_engine_idx - MC_ENGINE_IDX_NVDEC0;
58 return true;
59 case MC_ENGINE_IDX_NVENC ... MC_ENGINE_IDX_NVENC3:
60 *ptype = NVKM_ENGINE_NVENC;
61 *pinst = mc_engine_idx - MC_ENGINE_IDX_NVENC;
62 return true;
63 case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
64 *ptype = NVKM_ENGINE_NVJPG;
65 *pinst = mc_engine_idx - MC_ENGINE_IDX_NVJPEG0;
66 return true;
67 case MC_ENGINE_IDX_OFA0 ... MC_ENGINE_IDX_OFA1:
68 *ptype = NVKM_ENGINE_OFA;
69 *pinst = mc_engine_idx - MC_ENGINE_IDX_OFA0;
70 return true;
71 default:
72 return false;
73 }
74 }
75
76 static int
r570_gsp_get_static_info(struct nvkm_gsp * gsp)77 r570_gsp_get_static_info(struct nvkm_gsp *gsp)
78 {
79 GspStaticConfigInfo *rpc;
80 u32 gpc_mask;
81 u32 tpc_mask;
82 int ret;
83
84 rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
85 if (IS_ERR(rpc))
86 return PTR_ERR(rpc);
87
88 gsp->internal.client.object.client = &gsp->internal.client;
89 gsp->internal.client.object.parent = NULL;
90 gsp->internal.client.object.handle = rpc->hInternalClient;
91 gsp->internal.client.gsp = gsp;
92 INIT_LIST_HEAD(&gsp->internal.client.events);
93
94 gsp->internal.device.object.client = &gsp->internal.client;
95 gsp->internal.device.object.parent = &gsp->internal.client.object;
96 gsp->internal.device.object.handle = rpc->hInternalDevice;
97
98 gsp->internal.device.subdevice.client = &gsp->internal.client;
99 gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
100 gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
101
102 gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
103 gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
104
105 r535_gsp_get_static_info_fb(gsp, &rpc->fbRegionInfoParams);
106
107 if (gsp->rm->wpr->offset_set_by_acr) {
108 GspFwWprMeta *meta = gsp->wpr_meta.data;
109
110 meta->nonWprHeapOffset = rpc->fwWprLayoutOffset.nonWprHeapOffset;
111 meta->frtsOffset = rpc->fwWprLayoutOffset.frtsOffset;
112 }
113
114 nvkm_gsp_rpc_done(gsp, rpc);
115
116 ret = r570_gr_gpc_mask(gsp, &gpc_mask);
117 if (ret)
118 return ret;
119
120 for (int gpc = 0; gpc < 32; gpc++) {
121 if (gpc_mask & BIT(gpc)) {
122 ret = r570_gr_tpc_mask(gsp, gpc, &tpc_mask);
123 if (ret)
124 return ret;
125
126 gsp->gr.tpcs += hweight32(tpc_mask);
127 gsp->gr.gpcs++;
128 }
129 }
130
131 return 0;
132 }
133
134 static void
r570_gsp_acpi_info(struct nvkm_gsp * gsp,ACPI_METHOD_DATA * acpi)135 r570_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
136 {
137 #if defined(CONFIG_ACPI) && defined(CONFIG_X86)
138 acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
139
140 if (!handle)
141 return;
142
143 acpi->bValid = 1;
144
145 r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
146 r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
147 r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
148 #endif
149 }
150
151 static int
r570_gsp_set_system_info(struct nvkm_gsp * gsp)152 r570_gsp_set_system_info(struct nvkm_gsp *gsp)
153 {
154 struct nvkm_device *device = gsp->subdev.device;
155 struct pci_dev *pdev = container_of(device, struct nvkm_device_pci, device)->pdev;
156 GspSystemInfo *info;
157
158 if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
159 return -ENOSYS;
160
161 info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
162 if (IS_ERR(info))
163 return PTR_ERR(info);
164
165 info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
166 info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
167 info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
168 info->nvDomainBusDeviceFunc = pci_dev_id(pdev);
169 info->maxUserVa = TASK_SIZE;
170 info->pciConfigMirrorBase = device->pci->func->cfg.addr;
171 info->pciConfigMirrorSize = device->pci->func->cfg.size;
172 info->PCIDeviceID = (pdev->device << 16) | pdev->vendor;
173 info->PCISubDeviceID = (pdev->subsystem_device << 16) | pdev->subsystem_vendor;
174 info->PCIRevisionID = pdev->revision;
175 r570_gsp_acpi_info(gsp, &info->acpiMethodData);
176 info->bIsPrimary = video_is_primary_device(device->dev);
177 info->bPreserveVideoMemoryAllocations = false;
178
179 return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT);
180 }
181
182 static void
r570_gsp_set_rmargs(struct nvkm_gsp * gsp,bool resume)183 r570_gsp_set_rmargs(struct nvkm_gsp *gsp, bool resume)
184 {
185 GSP_ARGUMENTS_CACHED *args;
186
187 args = gsp->rmargs.data;
188 args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
189 args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
190 args->messageQueueInitArguments.cmdQueueOffset =
191 (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
192 args->messageQueueInitArguments.statQueueOffset =
193 (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
194
195 if (!resume) {
196 args->srInitArguments.oldLevel = 0;
197 args->srInitArguments.flags = 0;
198 args->srInitArguments.bInPMTransition = 0;
199 } else {
200 args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
201 args->srInitArguments.flags = 0;
202 args->srInitArguments.bInPMTransition = 1;
203 }
204
205 args->bDmemStack = 1;
206 }
207
208 const struct nvkm_rm_api_gsp
209 r570_gsp = {
210 .set_rmargs = r570_gsp_set_rmargs,
211 .set_system_info = r570_gsp_set_system_info,
212 .get_static_info = r570_gsp_get_static_info,
213 .xlat_mc_engine_idx = r570_gsp_xlat_mc_engine_idx,
214 .drop_post_nocat_record = r570_gsp_drop_post_nocat_record,
215 .sr_data_size = r570_gsp_sr_data_size,
216 };
217