Lines Matching full:vmm

42 	return nvkm_vmm_ref(nvkm_uvmm(object)->vmm);  in nvkm_uvmm_search()
51 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_pfnclr() local
61 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw) in nvkm_uvmm_mthd_pfnclr()
65 mutex_lock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_pfnclr()
66 ret = nvkm_vmm_pfn_unmap(vmm, addr, size); in nvkm_uvmm_mthd_pfnclr()
67 mutex_unlock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_pfnclr()
79 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_pfnmap() local
94 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw) in nvkm_uvmm_mthd_pfnmap()
98 mutex_lock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_pfnmap()
99 ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys); in nvkm_uvmm_mthd_pfnmap()
100 mutex_unlock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_pfnmap()
112 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_unmap() local
122 if (nvkm_vmm_in_managed_range(vmm, addr, 0) && vmm->managed.raw) in nvkm_uvmm_mthd_unmap()
125 mutex_lock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_unmap()
126 vma = nvkm_vmm_node_search(vmm, addr); in nvkm_uvmm_mthd_unmap()
128 VMM_DEBUG(vmm, "lookup %016llx: %016llx", in nvkm_uvmm_mthd_unmap()
134 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); in nvkm_uvmm_mthd_unmap()
139 VMM_DEBUG(vmm, "unmapped"); in nvkm_uvmm_mthd_unmap()
143 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_uvmm_mthd_unmap()
146 mutex_unlock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_unmap()
158 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_map() local
171 if (nvkm_vmm_in_managed_range(vmm, addr, size) && vmm->managed.raw) in nvkm_uvmm_mthd_map()
176 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); in nvkm_uvmm_mthd_map()
180 mutex_lock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_map()
181 if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) { in nvkm_uvmm_mthd_map()
182 VMM_DEBUG(vmm, "lookup %016llx", addr); in nvkm_uvmm_mthd_map()
187 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); in nvkm_uvmm_mthd_map()
192 VMM_DEBUG(vmm, "pfnmap %016llx", addr); in nvkm_uvmm_mthd_map()
199 VMM_DEBUG(vmm, "split %d %d %d " in nvkm_uvmm_mthd_map()
206 vma = nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_uvmm_mthd_map()
213 mutex_unlock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_map()
215 ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc); in nvkm_uvmm_mthd_map()
222 mutex_lock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_map()
224 nvkm_vmm_unmap_region(vmm, vma); in nvkm_uvmm_mthd_map()
226 mutex_unlock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_map()
237 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_put() local
247 mutex_lock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_put()
248 vma = nvkm_vmm_node_search(vmm, args->v0.addr); in nvkm_uvmm_mthd_put()
250 VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr, in nvkm_uvmm_mthd_put()
256 VMM_DEBUG(vmm, "denied %016llx: %d", addr, vma->busy); in nvkm_uvmm_mthd_put()
260 nvkm_vmm_put_locked(vmm, vma); in nvkm_uvmm_mthd_put()
263 mutex_unlock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_put()
273 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_get() local
290 mutex_lock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_get()
291 ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse, in nvkm_uvmm_mthd_get()
293 mutex_unlock(&vmm->mutex.vmm); in nvkm_uvmm_mthd_get()
311 page = uvmm->vmm->func->page; in nvkm_uvmm_mthd_page()
332 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_page_index() local
336 for (page = vmm->func->page; page->shift; page++) { in nvkm_uvmm_page_index()
342 VMM_DEBUG(vmm, "page %d %016llx", shift, size); in nvkm_uvmm_page_index()
348 *refd = page - vmm->func->page; in nvkm_uvmm_page_index()
356 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_raw_get() local
360 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size)) in nvkm_uvmm_mthd_raw_get()
367 return nvkm_vmm_raw_get(vmm, args->addr, args->size, refd); in nvkm_uvmm_mthd_raw_get()
373 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_raw_put() local
377 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size)) in nvkm_uvmm_mthd_raw_put()
384 nvkm_vmm_raw_put(vmm, args->addr, args->size, refd); in nvkm_uvmm_mthd_raw_put()
393 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_raw_map() local
408 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size)) in nvkm_uvmm_mthd_raw_map()
419 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); in nvkm_uvmm_mthd_raw_map()
423 ret = nvkm_memory_map(memory, args->offset, vmm, &vma, argv, argc); in nvkm_uvmm_mthd_raw_map()
433 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_raw_unmap() local
437 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size)) in nvkm_uvmm_mthd_raw_unmap()
444 nvkm_vmm_raw_unmap(vmm, args->addr, args->size, in nvkm_uvmm_mthd_raw_unmap()
453 struct nvkm_vmm *vmm = uvmm->vmm; in nvkm_uvmm_mthd_raw_sparse() local
455 if (!nvkm_vmm_in_managed_range(vmm, args->addr, args->size)) in nvkm_uvmm_mthd_raw_sparse()
458 return nvkm_vmm_raw_sparse(vmm, args->addr, args->size, args->ref); in nvkm_uvmm_mthd_raw_sparse()
469 if (!uvmm->vmm->managed.raw) in nvkm_uvmm_mthd_raw()
505 if (uvmm->vmm->func->mthd) { in nvkm_uvmm_mthd()
506 return uvmm->vmm->func->mthd(uvmm->vmm, in nvkm_uvmm_mthd()
521 nvkm_vmm_unref(&uvmm->vmm); in nvkm_uvmm_dtor()
560 if (!mmu->vmm) { in nvkm_uvmm_new()
561 ret = mmu->func->vmm.ctor(mmu, managed || raw, addr, size, in nvkm_uvmm_new()
562 argv, argc, NULL, "user", &uvmm->vmm); in nvkm_uvmm_new()
566 uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug); in nvkm_uvmm_new()
571 uvmm->vmm = nvkm_vmm_ref(mmu->vmm); in nvkm_uvmm_new()
573 uvmm->vmm->managed.raw = raw; in nvkm_uvmm_new()
576 ret = mmu->func->promote_vmm(uvmm->vmm); in nvkm_uvmm_new()
581 page = uvmm->vmm->func->page; in nvkm_uvmm_new()
585 args->v0.addr = uvmm->vmm->start; in nvkm_uvmm_new()
586 args->v0.size = uvmm->vmm->limit; in nvkm_uvmm_new()