1 /* SPDX-License-Identifier: MIT 2 * 3 * Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. 4 */ 5 #include <subdev/instmem/priv.h> 6 #include <subdev/bar.h> 7 #include <subdev/gsp.h> 8 #include <subdev/mmu/vmm.h> 9 10 #include "nvrm/fbsr.h" 11 #include "nvrm/fifo.h" 12 13 static int 14 r570_fbsr_suspend_channels(struct nvkm_gsp *gsp, bool suspend) 15 { 16 NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING_PARAMS *ctrl; 17 18 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, 19 NV2080_CTRL_CMD_INTERNAL_FIFO_TOGGLE_ACTIVE_CHANNEL_SCHEDULING, 20 sizeof(*ctrl)); 21 if (IS_ERR(ctrl)) 22 return PTR_ERR(ctrl); 23 24 ctrl->bDisableActiveChannels = suspend; 25 26 return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); 27 } 28 29 static void 30 r570_fbsr_resume(struct nvkm_gsp *gsp) 31 { 32 struct nvkm_device *device = gsp->subdev.device; 33 struct nvkm_instmem *imem = device->imem; 34 struct nvkm_instobj *iobj; 35 struct nvkm_vmm *vmm; 36 37 /* Restore BAR2 page tables via BAR0 window, and re-enable BAR2. */ 38 list_for_each_entry(iobj, &imem->boot, head) { 39 if (iobj->suspend) 40 nvkm_instobj_load(iobj); 41 } 42 43 device->bar->bar2 = true; 44 45 vmm = nvkm_bar_bar2_vmm(device); 46 vmm->func->flush(vmm, 0); 47 48 /* Restore remaining BAR2 allocations (including BAR1 page tables) via BAR2. */ 49 list_for_each_entry(iobj, &imem->list, head) { 50 if (iobj->suspend) 51 nvkm_instobj_load(iobj); 52 } 53 54 vmm = nvkm_bar_bar1_vmm(device); 55 vmm->func->flush(vmm, 0); 56 57 /* Resume channel scheduling. */ 58 r570_fbsr_suspend_channels(device->gsp, false); 59 60 /* Finish cleaning up. */ 61 r535_fbsr_resume(gsp); 62 } 63 64 static int 65 r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) 66 { 67 NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; 68 struct nvkm_gsp_object memlist; 69 int ret; 70 71 ret = r535_fbsr_memlist(&gsp->internal.device, 0xcaf00003, NVKM_MEM_TARGET_HOST, 72 0, size, sgt, &memlist); 73 if (ret) 74 return ret; 75 76 ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice, 77 NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl)); 78 if (IS_ERR(ctrl)) 79 return PTR_ERR(ctrl); 80 81 ctrl->hClient = gsp->internal.client.object.handle; 82 ctrl->hSysMem = memlist.handle; 83 ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr; 84 ctrl->bEnteringGcoffState = 1; 85 86 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); 87 if (ret) 88 return ret; 89 90 nvkm_gsp_rm_free(&memlist); 91 return 0; 92 } 93 94 static int 95 r570_fbsr_suspend(struct nvkm_gsp *gsp) 96 { 97 struct nvkm_subdev *subdev = &gsp->subdev; 98 struct nvkm_device *device = subdev->device; 99 struct nvkm_instmem *imem = device->imem; 100 struct nvkm_instobj *iobj; 101 u64 size; 102 int ret; 103 104 /* Stop channel scheduling. */ 105 r570_fbsr_suspend_channels(gsp, true); 106 107 /* Save BAR2 allocations to system memory. */ 108 list_for_each_entry(iobj, &imem->list, head) { 109 if (iobj->preserve) { 110 ret = nvkm_instobj_save(iobj); 111 if (ret) 112 return ret; 113 } 114 } 115 116 list_for_each_entry(iobj, &imem->boot, head) { 117 ret = nvkm_instobj_save(iobj); 118 if (ret) 119 return ret; 120 } 121 122 /* Disable BAR2 access. */ 123 device->bar->bar2 = false; 124 125 /* Allocate system memory to hold RM's VRAM allocations across suspend. */ 126 size = gsp->fb.heap.size; 127 size += gsp->fb.rsvd_size; 128 size += gsp->fb.bios.vga_workspace.size; 129 nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", size); 130 131 ret = nvkm_gsp_sg(device, size, &gsp->sr.fbsr); 132 if (ret) 133 return ret; 134 135 /* Initialise FBSR on RM. */ 136 ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size); 137 if (ret) { 138 nvkm_gsp_sg_free(device, &gsp->sr.fbsr); 139 return ret; 140 } 141 142 return 0; 143 } 144 145 const struct nvkm_rm_api_fbsr 146 r570_fbsr = { 147 .suspend = r570_fbsr_suspend, 148 .resume = r570_fbsr_resume, 149 }; 150