Lines Matching defs:uvmm

6  * The uvmm mutex protects any operations on the GPU VA space provided by the
91 nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm,
94 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
100 nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
103 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
109 nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
112 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
118 nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
121 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
127 nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
130 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
136 nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
141 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
177 return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range);
276 __nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
282 MA_STATE(mas, &uvmm->region_mt, addr, addr);
295 reg->uvmm = uvmm;
301 nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
307 reg->uvmm = uvmm;
311 ret = __nouveau_uvma_region_insert(uvmm, reg);
321 struct nouveau_uvmm *uvmm = reg->uvmm;
322 MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0);
328 nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
334 if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range))
341 ret = nouveau_uvma_region_insert(uvmm, reg, addr, range);
345 ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range);
359 nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm,
362 MA_STATE(mas, &uvmm->region_mt, addr, 0);
368 nouveau_uvma_region_find(struct nouveau_uvmm *uvmm,
373 reg = nouveau_uvma_region_find_first(uvmm, addr, range);
387 struct nouveau_uvmm *uvmm = reg->uvmm;
389 return drm_gpuvm_interval_empty(&uvmm->base,
397 struct nouveau_uvmm *uvmm = reg->uvmm;
405 nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range);
412 nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm,
417 reg = nouveau_uvma_region_find(uvmm, addr, range);
454 nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
503 nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
540 nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
556 nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm,
567 nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, &args);
571 nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm,
577 nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, NULL);
581 op_map_prepare(struct nouveau_uvmm *uvmm,
596 drm_gpuva_map(&uvmm->base, &uvma->va, op);
615 nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
630 ret = op_map_prepare(uvmm, &new->map, &op->map, args);
635 ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
659 ret = op_map_prepare(uvmm, &new->prev, r->prev,
669 ret = op_map_prepare(uvmm, &new->next, r->next,
706 ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
727 nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops,
734 nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm,
747 return nouveau_uvmm_sm_prepare(uvmm, new, ops, &args);
751 nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm,
755 return nouveau_uvmm_sm_prepare(uvmm, new, ops, NULL);
827 nouveau_uvmm_sm(struct nouveau_uvmm *uvmm,
853 nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm,
857 return nouveau_uvmm_sm(uvmm, new, ops);
861 nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm,
865 return nouveau_uvmm_sm(uvmm, new, ops);
869 nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
896 nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
922 nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm,
926 nouveau_uvmm_sm_cleanup(uvmm, new, ops, false);
930 nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
934 nouveau_uvmm_sm_cleanup(uvmm, new, ops, true);
938 nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
946 if (!drm_gpuvm_range_valid(&uvmm->base, addr, range))
996 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1010 return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range);
1048 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1054 nouveau_uvmm_lock(uvmm);
1055 reg = nouveau_uvma_region_find_first(uvmm, addr, range);
1057 nouveau_uvmm_unlock(uvmm);
1066 nouveau_uvmm_unlock(uvmm);
1071 nouveau_uvmm_unlock(uvmm);
1195 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1210 op->vm_bo = drm_gpuvm_bo_obtain(&uvmm->base, obj);
1233 * uvmm lock until we can't fail anymore. This is due to the set of GPU
1237 nouveau_uvmm_lock(uvmm);
1242 ret = nouveau_uvma_region_create(uvmm,
1250 op->reg = nouveau_uvma_region_find(uvmm, op->va.addr,
1257 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
1265 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
1268 drm_gpuva_ops_free(&uvmm->base, op->ops);
1280 reg = nouveau_uvma_region_find_first(uvmm,
1303 op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
1313 ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new,
1319 drm_gpuva_ops_free(&uvmm->base, op->ops);
1327 op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
1335 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
1338 drm_gpuva_ops_free(&uvmm->base, op->ops);
1391 nouveau_uvmm_unlock(uvmm);
1401 nouveau_uvma_region_destroy(uvmm, op->va.addr,
1405 __nouveau_uvma_region_insert(uvmm, op->reg);
1406 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
1410 nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new,
1416 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
1421 drm_gpuva_ops_free(&uvmm->base, op->ops);
1426 nouveau_uvmm_unlock(uvmm);
1444 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1454 ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops);
1461 ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops);
1478 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1493 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
1498 nouveau_uvmm_lock(uvmm);
1500 nouveau_uvmm_unlock(uvmm);
1508 nouveau_uvmm_sm_map_cleanup(uvmm, &op->new,
1513 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
1519 drm_gpuva_ops_free(&uvmm->base, op->ops);
1796 struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
1798 kfree(uvmm);
1820 struct nouveau_uvmm *uvmm;
1838 if (unlikely(cli->uvmm.disabled)) {
1843 uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL);
1844 if (!uvmm) {
1851 kfree(uvmm);
1856 mutex_init(&uvmm->mutex);
1857 mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
1858 mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
1860 drm_gpuvm_init(&uvmm->base, cli->name, 0, drm, r_obj,
1869 ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
1873 NULL, 0, &uvmm->vmm.vmm);
1877 uvmm->vmm.cli = cli;
1878 cli->uvmm.ptr = uvmm;
1884 drm_gpuvm_put(&uvmm->base);
1891 nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
1893 MA_STATE(mas, &uvmm->region_mt, 0, 0);
1895 struct nouveau_cli *cli = uvmm->vmm.cli;
1898 nouveau_uvmm_lock(uvmm);
1899 drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
1903 if (unlikely(va == &uvmm->base.kernel_alloc_node))
1925 WARN(!mtree_empty(&uvmm->region_mt),
1927 __mt_destroy(&uvmm->region_mt);
1928 nouveau_uvmm_unlock(uvmm);
1931 nouveau_vmm_fini(&uvmm->vmm);
1932 drm_gpuvm_put(&uvmm->base);