Lines Matching +full:rpc +full:- +full:if
33 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
61 struct nvkm_gsp_client *client = device->object.client; in fbsr_memlist()
62 struct nvkm_gsp *gsp = client->gsp; in fbsr_memlist()
64 rpc_alloc_memory_v13_01 *rpc; in fbsr_memlist() local
67 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY, in fbsr_memlist()
68 sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0])); in fbsr_memlist()
69 if (IS_ERR(rpc)) in fbsr_memlist()
70 return PTR_ERR(rpc); in fbsr_memlist()
72 rpc->hClient = client->object.handle; in fbsr_memlist()
73 rpc->hDevice = device->object.handle; in fbsr_memlist()
74 rpc->hMemory = handle; in fbsr_memlist()
75 if (aper == NVKM_MEM_TARGET_HOST) { in fbsr_memlist()
76 rpc->hClass = NV01_MEMORY_LIST_SYSTEM; in fbsr_memlist()
77 rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) | in fbsr_memlist()
81 rpc->hClass = NV01_MEMORY_LIST_FBMEM; in fbsr_memlist()
82 rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) | in fbsr_memlist()
85 rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */ in fbsr_memlist()
87 rpc->pteAdjust = 0; in fbsr_memlist()
88 rpc->length = size; in fbsr_memlist()
89 rpc->pageCount = pages; in fbsr_memlist()
90 rpc->pteDesc.idr = 0; in fbsr_memlist()
91 rpc->pteDesc.reserved1 = 0; in fbsr_memlist()
92 rpc->pteDesc.length = pages; in fbsr_memlist()
94 if (sgt) { in fbsr_memlist()
100 rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i; in fbsr_memlist()
105 rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; in fbsr_memlist()
108 ret = nvkm_gsp_rpc_wr(gsp, rpc, true); in fbsr_memlist()
109 if (ret) in fbsr_memlist()
112 object->client = device->object.client; in fbsr_memlist()
113 object->parent = &device->object; in fbsr_memlist()
114 object->handle = handle; in fbsr_memlist()
122 struct nvkm_gsp *gsp = fbsr->client.gsp; in fbsr_send()
126 ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM, in fbsr_send()
127 item->addr, item->size, NULL, &memlist); in fbsr_send()
128 if (ret) in fbsr_send()
131 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, in fbsr_send()
134 if (IS_ERR(ctrl)) { in fbsr_send()
139 ctrl->fbsrType = FBSR_TYPE_DMA; in fbsr_send()
140 ctrl->hClient = fbsr->client.object.handle; in fbsr_send()
141 ctrl->hVidMem = fbsr->hmemory++; in fbsr_send()
142 ctrl->vidOffset = 0; in fbsr_send()
143 ctrl->sysOffset = fbsr->sys_offset; in fbsr_send()
144 ctrl->size = item->size; in fbsr_send()
146 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); in fbsr_send()
149 if (ret) in fbsr_send()
152 fbsr->sys_offset += item->size; in fbsr_send()
160 struct nvkm_gsp *gsp = fbsr->client.gsp; in fbsr_init()
164 ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST, in fbsr_init()
165 0, fbsr->size, sgt, &memlist); in fbsr_init()
166 if (ret) in fbsr_init()
169 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, in fbsr_init()
171 if (IS_ERR(ctrl)) in fbsr_init()
174 ctrl->fbsrType = FBSR_TYPE_DMA; in fbsr_init()
175 ctrl->numRegions = fbsr->regions; in fbsr_init()
176 ctrl->hClient = fbsr->client.object.handle; in fbsr_init()
177 ctrl->hSysMem = fbsr->hmemory++; in fbsr_init()
178 ctrl->gspFbAllocsSysOffset = items_size; in fbsr_init()
180 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); in fbsr_init()
181 if (ret) in fbsr_init()
193 if (!(item = kzalloc(sizeof(*item), GFP_KERNEL))) in fbsr_vram()
196 item->type = type; in fbsr_vram()
197 item->addr = addr; in fbsr_vram()
198 item->size = size; in fbsr_vram()
199 list_add_tail(&item->head, &fbsr->items); in fbsr_vram()
213 if (imem->rm.fbsr_valid) { in r535_instmem_resume()
214 nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr); in r535_instmem_resume()
215 imem->rm.fbsr_valid = false; in r535_instmem_resume()
222 struct nvkm_subdev *subdev = &imem->subdev; in r535_instmem_suspend()
223 struct nvkm_device *device = subdev->device; in r535_instmem_suspend()
224 struct nvkm_gsp *gsp = device->gsp; in r535_instmem_suspend()
235 list_for_each_entry(iobj, &imem->list, head) { in r535_instmem_suspend()
236 if (iobj->preserve) { in r535_instmem_suspend()
237 if (!fbsr_inst(&fbsr, "inst", &iobj->memory)) in r535_instmem_suspend()
238 return -ENOMEM; in r535_instmem_suspend()
242 list_for_each_entry(iobj, &imem->boot, head) { in r535_instmem_suspend()
243 if (!fbsr_inst(&fbsr, "boot", &iobj->memory)) in r535_instmem_suspend()
244 return -ENOMEM; in r535_instmem_suspend()
247 if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size)) in r535_instmem_suspend()
248 return -ENOMEM; in r535_instmem_suspend()
253 item->addr, item->size, item->type); in r535_instmem_suspend()
254 fbsr.size += item->size; in r535_instmem_suspend()
261 fbsr.size += gsp->fb.rsvd_size; in r535_instmem_suspend()
262 fbsr.size += gsp->fb.bios.vga_workspace.size; in r535_instmem_suspend()
265 ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr); in r535_instmem_suspend()
266 if (ret) in r535_instmem_suspend()
271 if (ret) in r535_instmem_suspend()
274 ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size); in r535_instmem_suspend()
275 if (WARN_ON(ret)) in r535_instmem_suspend()
281 if (WARN_ON(ret)) in r535_instmem_suspend()
285 imem->rm.fbsr_valid = true; in r535_instmem_suspend()
289 if (ret) /* ... unless we failed already. */ in r535_instmem_suspend()
290 nvkm_gsp_sg_free(device, &imem->rm.fbsr); in r535_instmem_suspend()
293 list_del(&item->head); in r535_instmem_suspend()
305 kfree(imem->func); in r535_instmem_dtor()
317 if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) in r535_instmem_new()
318 return -ENOMEM; in r535_instmem_new()
320 rm->dtor = r535_instmem_dtor; in r535_instmem_new()
321 rm->fini = hw->fini; in r535_instmem_new()
322 rm->suspend = r535_instmem_suspend; in r535_instmem_new()
323 rm->resume = r535_instmem_resume; in r535_instmem_new()
324 rm->memory_new = hw->memory_new; in r535_instmem_new()
325 rm->memory_wrap = hw->memory_wrap; in r535_instmem_new()
326 rm->zero = false; in r535_instmem_new()
329 if (ret) in r535_instmem_new()