1 /*
2 * Copyright 2023 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include <subdev/instmem/priv.h>
23 #include <subdev/gsp.h>
24
25 #include <nvhw/drf.h>
26
27 #include "nvrm/fbsr.h"
28 #include "nvrm/rpcfn.h"
29
30 struct fbsr_item {
31 const char *type;
32 u64 addr;
33 u64 size;
34
35 struct list_head head;
36 };
37
38 struct fbsr {
39 struct list_head items;
40
41 u64 size;
42 int regions;
43
44 struct nvkm_gsp_client client;
45 struct nvkm_gsp_device device;
46
47 u64 hmemory;
48 u64 sys_offset;
49 };
50
51 int
r535_fbsr_memlist(struct nvkm_gsp_device * device,u32 handle,enum nvkm_memory_target aper,u64 phys,u64 size,struct sg_table * sgt,struct nvkm_gsp_object * object)52 r535_fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
53 u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
54 {
55 struct nvkm_gsp_client *client = device->object.client;
56 struct nvkm_gsp *gsp = client->gsp;
57 const u32 pages = size / GSP_PAGE_SIZE;
58 rpc_alloc_memory_v13_01 *rpc;
59 int ret;
60
61 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
62 sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
63 if (IS_ERR(rpc))
64 return PTR_ERR(rpc);
65
66 rpc->hClient = client->object.handle;
67 rpc->hDevice = device->object.handle;
68 rpc->hMemory = handle;
69 if (aper == NVKM_MEM_TARGET_HOST) {
70 rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
71 rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
72 NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
73 NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
74 } else {
75 rpc->hClass = NV01_MEMORY_LIST_FBMEM;
76 rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
77 NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
78 NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
79 rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
80 }
81 rpc->pteAdjust = 0;
82 rpc->length = size;
83 rpc->pageCount = pages;
84 rpc->pteDesc.idr = 0;
85 rpc->pteDesc.reserved1 = 0;
86 rpc->pteDesc.length = pages;
87
88 if (sgt) {
89 struct scatterlist *sgl;
90 int pte = 0, idx;
91
92 for_each_sgtable_dma_sg(sgt, sgl, idx) {
93 for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
94 rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
95
96 }
97 } else {
98 for (int i = 0; i < pages; i++)
99 rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
100 }
101
102 ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL);
103 if (ret)
104 return ret;
105
106 object->client = device->object.client;
107 object->parent = &device->object;
108 object->handle = handle;
109 return 0;
110 }
111
112 static int
fbsr_send(struct fbsr * fbsr,struct fbsr_item * item)113 fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
114 {
115 NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
116 struct nvkm_gsp *gsp = fbsr->client.gsp;
117 struct nvkm_gsp_object memlist;
118 int ret;
119
120 ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
121 item->addr, item->size, NULL, &memlist);
122 if (ret)
123 return ret;
124
125 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
126 NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
127 sizeof(*ctrl));
128 if (IS_ERR(ctrl)) {
129 ret = PTR_ERR(ctrl);
130 goto done;
131 }
132
133 ctrl->fbsrType = FBSR_TYPE_DMA;
134 ctrl->hClient = fbsr->client.object.handle;
135 ctrl->hVidMem = fbsr->hmemory++;
136 ctrl->vidOffset = 0;
137 ctrl->sysOffset = fbsr->sys_offset;
138 ctrl->size = item->size;
139
140 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
141 done:
142 nvkm_gsp_rm_free(&memlist);
143 if (ret)
144 return ret;
145
146 fbsr->sys_offset += item->size;
147 return 0;
148 }
149
150 static int
fbsr_init(struct fbsr * fbsr,struct sg_table * sgt,u64 items_size)151 fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
152 {
153 NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
154 struct nvkm_gsp *gsp = fbsr->client.gsp;
155 struct nvkm_gsp_object memlist;
156 int ret;
157
158 ret = r535_fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
159 0, fbsr->size, sgt, &memlist);
160 if (ret)
161 return ret;
162
163 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
164 NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
165 if (IS_ERR(ctrl))
166 return PTR_ERR(ctrl);
167
168 ctrl->fbsrType = FBSR_TYPE_DMA;
169 ctrl->numRegions = fbsr->regions;
170 ctrl->hClient = fbsr->client.object.handle;
171 ctrl->hSysMem = fbsr->hmemory++;
172 ctrl->gspFbAllocsSysOffset = items_size;
173
174 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
175 if (ret)
176 return ret;
177
178 nvkm_gsp_rm_free(&memlist);
179 return 0;
180 }
181
182 static bool
fbsr_vram(struct fbsr * fbsr,const char * type,u64 addr,u64 size)183 fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
184 {
185 struct fbsr_item *item;
186
187 if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
188 return false;
189
190 item->type = type;
191 item->addr = addr;
192 item->size = size;
193 list_add_tail(&item->head, &fbsr->items);
194 return true;
195 }
196
197 static bool
fbsr_inst(struct fbsr * fbsr,const char * type,struct nvkm_memory * memory)198 fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
199 {
200 return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
201 }
202
203 void
r535_fbsr_resume(struct nvkm_gsp * gsp)204 r535_fbsr_resume(struct nvkm_gsp *gsp)
205 {
206 /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
207 nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.fbsr);
208 }
209
210 static int
r535_fbsr_suspend(struct nvkm_gsp * gsp)211 r535_fbsr_suspend(struct nvkm_gsp *gsp)
212 {
213 struct nvkm_subdev *subdev = &gsp->subdev;
214 struct nvkm_device *device = subdev->device;
215 struct nvkm_instmem *imem = device->imem;
216 struct nvkm_instobj *iobj;
217 struct fbsr fbsr = {};
218 struct fbsr_item *item, *temp;
219 u64 items_size;
220 int ret;
221
222 INIT_LIST_HEAD(&fbsr.items);
223 fbsr.hmemory = 0xcaf00003;
224
225 /* Create a list of all regions we need RM to save during suspend. */
226 list_for_each_entry(iobj, &imem->list, head) {
227 if (iobj->preserve) {
228 if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
229 return -ENOMEM;
230 }
231 }
232
233 list_for_each_entry(iobj, &imem->boot, head) {
234 if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
235 return -ENOMEM;
236 }
237
238 if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
239 return -ENOMEM;
240
241 /* Determine memory requirements. */
242 list_for_each_entry(item, &fbsr.items, head) {
243 nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
244 item->addr, item->size, item->type);
245 fbsr.size += item->size;
246 fbsr.regions++;
247 }
248
249 items_size = fbsr.size;
250 nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
251
252 fbsr.size += gsp->fb.rsvd_size;
253 fbsr.size += gsp->fb.bios.vga_workspace.size;
254 nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
255
256 ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &gsp->sr.fbsr);
257 if (ret)
258 goto done;
259
260 /* Tell RM about the sysmem which will hold VRAM contents across suspend. */
261 ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
262 if (ret)
263 goto done_sgt;
264
265 ret = fbsr_init(&fbsr, &gsp->sr.fbsr, items_size);
266 if (WARN_ON(ret))
267 goto done_sgt;
268
269 /* Send VRAM regions that need saving. */
270 list_for_each_entry(item, &fbsr.items, head) {
271 ret = fbsr_send(&fbsr, item);
272 if (WARN_ON(ret))
273 goto done_sgt;
274 }
275
276 /* Cleanup everything except the sysmem backup, which will be removed after resume. */
277 done_sgt:
278 if (ret) /* ... unless we failed already. */
279 nvkm_gsp_sg_free(device, &gsp->sr.fbsr);
280 done:
281 list_for_each_entry_safe(item, temp, &fbsr.items, head) {
282 list_del(&item->head);
283 kfree(item);
284 }
285
286 nvkm_gsp_device_dtor(&fbsr.device);
287 nvkm_gsp_client_dtor(&fbsr.client);
288 return ret;
289 }
290
291 const struct nvkm_rm_api_fbsr
292 r535_fbsr = {
293 .suspend = r535_fbsr_suspend,
294 .resume = r535_fbsr_resume,
295 };
296
297 static void *
r535_instmem_dtor(struct nvkm_instmem * imem)298 r535_instmem_dtor(struct nvkm_instmem *imem)
299 {
300 kfree(imem->func);
301 return imem;
302 }
303
304 int
r535_instmem_new(const struct nvkm_instmem_func * hw,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_instmem ** pinstmem)305 r535_instmem_new(const struct nvkm_instmem_func *hw,
306 struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
307 struct nvkm_instmem **pinstmem)
308 {
309 struct nvkm_instmem_func *rm;
310 int ret;
311
312 if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
313 return -ENOMEM;
314
315 rm->dtor = r535_instmem_dtor;
316 rm->fini = hw->fini;
317 rm->memory_new = hw->memory_new;
318 rm->memory_wrap = hw->memory_wrap;
319 rm->zero = false;
320 rm->set_bar0_window_addr = hw->set_bar0_window_addr;
321
322 ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
323 if (ret)
324 kfree(rm);
325
326 return ret;
327 }
328