Lines Matching full:vmm

22 #include "vmm.h"
32 gf100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, in gf100_vmm_pgt_pte() argument
44 VMM_WO064(pt, vmm, ptei++ * 8, data); in gf100_vmm_pgt_pte()
51 VMM_WO064(pt, vmm, ptei++ * 8, data); in gf100_vmm_pgt_pte()
58 gf100_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, in gf100_vmm_pgt_sgl() argument
61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); in gf100_vmm_pgt_sgl()
65 gf100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, in gf100_vmm_pgt_dma() argument
69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); in gf100_vmm_pgt_dma()
73 VMM_WO064(pt, vmm, ptei++ * 8, data); in gf100_vmm_pgt_dma()
80 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); in gf100_vmm_pgt_dma()
84 gf100_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, in gf100_vmm_pgt_mem() argument
87 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); in gf100_vmm_pgt_mem()
91 gf100_vmm_pgt_unmap(struct nvkm_vmm *vmm, in gf100_vmm_pgt_unmap() argument
94 VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes); in gf100_vmm_pgt_unmap()
106 gf100_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei) in gf100_vmm_pgd_pde() argument
142 VMM_WO064(pd, vmm, pdei * 8, data); in gf100_vmm_pgd_pde()
181 gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) in gf100_vmm_invalidate_pdb() argument
183 struct nvkm_device *device = vmm->mmu->subdev.device; in gf100_vmm_invalidate_pdb()
188 gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type) in gf100_vmm_invalidate() argument
190 struct nvkm_device *device = vmm->mmu->subdev.device; in gf100_vmm_invalidate()
191 struct nvkm_mmu_pt *pd = vmm->pd->pt[0]; in gf100_vmm_invalidate()
194 mutex_lock(&vmm->mmu->mutex); in gf100_vmm_invalidate()
212 addr |= (vmm->pd->pt[0]->addr >> 12) << 4; in gf100_vmm_invalidate()
214 vmm->func->invalidate_pdb(vmm, addr); in gf100_vmm_invalidate()
224 mutex_unlock(&vmm->mmu->mutex); in gf100_vmm_invalidate()
228 gf100_vmm_flush(struct nvkm_vmm *vmm, int depth) in gf100_vmm_flush() argument
231 if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) in gf100_vmm_flush()
233 gf100_vmm_invalidate(vmm, type); in gf100_vmm_flush()
237 gf100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, in gf100_vmm_valid() argument
247 struct nvkm_device *device = vmm->mmu->subdev.device; in gf100_vmm_valid()
268 VMM_DEBUG(vmm, "args"); in gf100_vmm_valid()
272 aper = vmm->func->aper(target); in gf100_vmm_valid()
276 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); in gf100_vmm_valid()
278 VMM_DEBUG(vmm, "kind %02x", kind); in gf100_vmm_valid()
286 VMM_DEBUG(vmm, "comp %d %02x", aper, page->type); in gf100_vmm_valid()
295 VMM_DEBUG(vmm, "comp %d", ret); in gf100_vmm_valid()
336 gf100_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in gf100_vmm_part() argument
342 gf100_vmm_join_(struct nvkm_vmm *vmm, struct nvkm_memory *inst, u64 base) in gf100_vmm_join_() argument
344 struct nvkm_mmu_pt *pd = vmm->pd->pt[0]; in gf100_vmm_join_()
360 nvkm_wo64(inst, 0x0208, vmm->limit - 1); in gf100_vmm_join_()
366 gf100_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in gf100_vmm_join() argument
368 return gf100_vmm_join_(vmm, inst, 0); in gf100_vmm_join()