1 /* 2 * Copyright 2023 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <subdev/bar/gf100.h> 23 24 #include <core/mm.h> 25 #include <subdev/fb.h> 26 #include <subdev/gsp.h> 27 #include <subdev/instmem.h> 28 #include <subdev/mmu/vmm.h> 29 30 #include "nvrm/bar.h" 31 #include "nvrm/rpcfn.h" 32 33 static void 34 r535_bar_flush(struct nvkm_bar *bar) 35 { 36 /* Use NV_PFLUSH in resume path - needed on R570 to flush writes before 37 * BAR2 page tables have been restored. 38 */ 39 if (unlikely(!bar->bar2)) { 40 g84_bar_flush(bar); 41 return; 42 } 43 44 ioread32_native(bar->flushBAR2); 45 } 46 47 static void 48 r535_bar_bar2_wait(struct nvkm_bar *base) 49 { 50 } 51 52 static int 53 r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u8 page_shift, u64 pdbe) 54 { 55 rpc_update_bar_pde_v15_00 *rpc; 56 57 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc)); 58 if (WARN_ON(IS_ERR_OR_NULL(rpc))) 59 return -EIO; 60 61 rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; 62 rpc->info.entryValue = pdbe; 63 rpc->info.entryLevelShift = page_shift; 64 65 return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); 66 } 67 68 static void 69 r535_bar_bar2_fini(struct nvkm_bar *bar) 70 { 71 struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; 72 struct nvkm_gsp *gsp = bar->subdev.device->gsp; 73 74 bar->flushBAR2 = bar->flushBAR2PhysMode; 75 nvkm_done(bar->flushFBZero); 76 77 WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, 0)); 78 } 79 80 static void 81 r535_bar_bar2_init(struct nvkm_bar *bar) 82 { 83 struct nvkm_device *device = bar->subdev.device; 84 struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; 85 struct nvkm_gsp *gsp = device->gsp; 86 struct nvkm_memory *pdb = vmm->pd->pt[0]->memory; 87 u32 pdb_offset = vmm->pd->pt[0]->base; 88 u32 pdbe_lo, pdbe_hi; 89 u64 pdbe; 90 91 nvkm_kmap(pdb); 92 pdbe_lo = nvkm_ro32(pdb, pdb_offset + 0); 93 pdbe_hi = nvkm_ro32(pdb, pdb_offset + 4); 94 pdbe = ((u64)pdbe_hi << 32) | pdbe_lo; 95 nvkm_done(pdb); 96 97 WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->func->page[0].shift, pdbe)); 98 vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; 99 100 if (!bar->flushFBZero) { 101 struct nvkm_memory *fbZero; 102 int ret; 103 104 ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero); 105 if (ret == 0) { 106 ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero); 107 nvkm_memory_unref(&fbZero); 108 } 109 WARN_ON(ret); 110 } 111 112 bar->bar2 = true; 113 bar->flushBAR2 = nvkm_kmap(bar->flushFBZero); 114 WARN_ON(!bar->flushBAR2); 115 } 116 117 static void 118 r535_bar_bar1_wait(struct nvkm_bar *base) 119 { 120 } 121 122 static void 123 r535_bar_bar1_fini(struct nvkm_bar *base) 124 { 125 } 126 127 static void 128 r535_bar_bar1_init(struct nvkm_bar *bar) 129 { 130 struct nvkm_device *device = bar->subdev.device; 131 struct nvkm_gsp *gsp = device->gsp; 132 struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm; 133 struct nvkm_memory *pd3; 134 int ret; 135 136 ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3); 137 if (WARN_ON(ret)) 138 return; 139 140 nvkm_memory_unref(&vmm->pd->pt[0]->memory); 141 142 ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory); 143 nvkm_memory_unref(&pd3); 144 if (WARN_ON(ret)) 145 return; 146 147 vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory); 148 } 149 150 static void * 151 r535_bar_dtor(struct nvkm_bar *bar) 152 { 153 void *data = gf100_bar_dtor(bar); 154 155 nvkm_memory_unref(&bar->flushFBZero); 156 157 if (bar->flushBAR2PhysMode) 158 iounmap(bar->flushBAR2PhysMode); 159 160 kfree(bar->func); 161 return data; 162 } 163 164 int 165 r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, 166 enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) 167 { 168 struct nvkm_bar_func *rm; 169 struct nvkm_bar *bar; 170 int ret; 171 172 if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) 173 return -ENOMEM; 174 175 rm->dtor = r535_bar_dtor; 176 rm->oneinit = hw->oneinit; 177 rm->bar1.init = r535_bar_bar1_init; 178 rm->bar1.fini = r535_bar_bar1_fini; 179 rm->bar1.wait = r535_bar_bar1_wait; 180 rm->bar1.vmm = hw->bar1.vmm; 181 rm->bar2.init = r535_bar_bar2_init; 182 rm->bar2.fini = r535_bar_bar2_fini; 183 rm->bar2.wait = r535_bar_bar2_wait; 184 rm->bar2.vmm = hw->bar2.vmm; 185 rm->flush = r535_bar_flush; 186 187 ret = gf100_bar_new_(rm, device, type, inst, &bar); 188 if (ret) { 189 kfree(rm); 190 return ret; 191 } 192 *pbar = bar; 193 194 bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE); 195 if (!bar->flushBAR2PhysMode) 196 return -ENOMEM; 197 198 bar->flushBAR2 = bar->flushBAR2PhysMode; 199 200 gf100_bar(*pbar)->bar2_halve = true; 201 return 0; 202 } 203