Lines Matching full:vmm
23 #include "vmm.h"
75 struct nvkm_vmm *vmm; member
113 VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
129 if (it->vmm->func->flush) { in nvkm_vmm_flush()
131 it->vmm->func->flush(it->vmm, it->flush); in nvkm_vmm_flush()
145 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_unref_pdes() local
159 func->sparse(vmm, pgd->pt[0], pdei, 1); in nvkm_vmm_unref_pdes()
162 func->unmap(vmm, pgd->pt[0], pdei, 1); in nvkm_vmm_unref_pdes()
170 func->pde(vmm, pgd, pdei); in nvkm_vmm_unref_pdes()
177 func->pde(vmm, pgd, pdei); in nvkm_vmm_unref_pdes()
190 nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt); in nvkm_vmm_unref_pdes()
203 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_unref_sptes() local
244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes()
252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_unref_sptes()
267 dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes); in nvkm_vmm_unref_ptes()
272 desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes); in nvkm_vmm_unref_ptes()
302 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_ref_sptes() local
348 desc->func->sparse(vmm, pgt->pt[1], spti, sptc); in nvkm_vmm_ref_sptes()
351 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_ref_sptes()
358 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes); in nvkm_vmm_ref_sptes()
419 struct nvkm_vmm *vmm = it->vmm; in nvkm_vmm_ref_hwpt() local
420 struct nvkm_mmu *mmu = vmm->mmu; in nvkm_vmm_ref_hwpt()
457 desc->func->sparse(vmm, pt, pteb, ptes); in nvkm_vmm_ref_hwpt()
459 desc->func->invalid(vmm, pt, pteb, ptes); in nvkm_vmm_ref_hwpt()
462 desc->func->unmap(vmm, pt, pteb, ptes); in nvkm_vmm_ref_hwpt()
470 desc->func->sparse(vmm, pt, 0, pten); in nvkm_vmm_ref_hwpt()
472 desc->func->invalid(vmm, pt, 0, pten); in nvkm_vmm_ref_hwpt()
478 it->desc[it->lvl].func->pde(it->vmm, pgd, pdei); in nvkm_vmm_ref_hwpt()
501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_iter() argument
513 it.vmm = vmm; in nvkm_vmm_iter()
523 it.pt[it.max] = vmm->pd; in nvkm_vmm_iter()
567 MAP_PTES(vmm, pt, ptei, ptes, map); in nvkm_vmm_iter()
569 CLR_PTES(vmm, pt, ptei, ptes); in nvkm_vmm_iter()
602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_sparse_put() argument
605 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false, in nvkm_vmm_ptes_sparse_put()
612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_sparse_get() argument
616 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref", in nvkm_vmm_ptes_sparse_get()
621 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size); in nvkm_vmm_ptes_sparse_get()
630 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref) in nvkm_vmm_ptes_sparse() argument
632 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_ptes_sparse()
662 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block); in nvkm_vmm_ptes_sparse()
665 nvkm_vmm_ptes_sparse(vmm, start, size, false); in nvkm_vmm_ptes_sparse()
669 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block); in nvkm_vmm_ptes_sparse()
680 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_unmap() argument
685 mutex_lock(&vmm->mutex.map); in nvkm_vmm_ptes_unmap()
686 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn, in nvkm_vmm_ptes_unmap()
690 mutex_unlock(&vmm->mutex.map); in nvkm_vmm_ptes_unmap()
694 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_map() argument
698 mutex_lock(&vmm->mutex.map); in nvkm_vmm_ptes_map()
699 nvkm_vmm_iter(vmm, page, addr, size, "map", false, false, in nvkm_vmm_ptes_map()
701 mutex_unlock(&vmm->mutex.map); in nvkm_vmm_ptes_map()
705 nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_put_locked() argument
708 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false, in nvkm_vmm_ptes_put_locked()
713 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_put() argument
716 mutex_lock(&vmm->mutex.ref); in nvkm_vmm_ptes_put()
717 nvkm_vmm_ptes_put_locked(vmm, page, addr, size); in nvkm_vmm_ptes_put()
718 mutex_unlock(&vmm->mutex.ref); in nvkm_vmm_ptes_put()
722 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_get() argument
727 mutex_lock(&vmm->mutex.ref); in nvkm_vmm_ptes_get()
728 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false, in nvkm_vmm_ptes_get()
732 nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr); in nvkm_vmm_ptes_get()
733 mutex_unlock(&vmm->mutex.ref); in nvkm_vmm_ptes_get()
736 mutex_unlock(&vmm->mutex.ref); in nvkm_vmm_ptes_get()
741 __nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in __nvkm_vmm_ptes_unmap_put() argument
746 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref", in __nvkm_vmm_ptes_unmap_put()
753 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_unmap_put() argument
756 if (vmm->managed.raw) { in nvkm_vmm_ptes_unmap_put()
757 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn); in nvkm_vmm_ptes_unmap_put()
758 nvkm_vmm_ptes_put(vmm, page, addr, size); in nvkm_vmm_ptes_unmap_put()
760 __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn); in nvkm_vmm_ptes_unmap_put()
765 __nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in __nvkm_vmm_ptes_get_map() argument
769 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true, in __nvkm_vmm_ptes_get_map()
773 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false); in __nvkm_vmm_ptes_get_map()
780 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, in nvkm_vmm_ptes_get_map() argument
786 if (vmm->managed.raw) { in nvkm_vmm_ptes_get_map()
787 ret = nvkm_vmm_ptes_get(vmm, page, addr, size); in nvkm_vmm_ptes_get_map()
791 nvkm_vmm_ptes_map(vmm, page, addr, size, map, func); in nvkm_vmm_ptes_get_map()
795 return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func); in nvkm_vmm_ptes_get_map()
836 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_remove() argument
838 rb_erase(&vma->tree, &vmm->free); in nvkm_vmm_free_remove()
842 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_delete() argument
844 nvkm_vmm_free_remove(vmm, vma); in nvkm_vmm_free_delete()
850 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_free_insert() argument
852 struct rb_node **ptr = &vmm->free.rb_node; in nvkm_vmm_free_insert()
874 rb_insert_color(&vma->tree, &vmm->free); in nvkm_vmm_free_insert()
878 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_remove() argument
880 rb_erase(&vma->tree, &vmm->root); in nvkm_vmm_node_remove()
884 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_delete() argument
886 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_delete()
892 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_node_insert() argument
894 struct rb_node **ptr = &vmm->root.rb_node; in nvkm_vmm_node_insert()
910 rb_insert_color(&vma->tree, &vmm->root); in nvkm_vmm_node_insert()
914 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr) in nvkm_vmm_node_search() argument
916 struct rb_node *node = vmm->root.rb_node; in nvkm_vmm_node_search()
930 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
934 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev, in nvkm_vmm_node_merge() argument
940 nvkm_vmm_node_delete(vmm, next); in nvkm_vmm_node_merge()
943 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
950 nvkm_vmm_node_remove(vmm, next); in nvkm_vmm_node_merge()
954 nvkm_vmm_node_insert(vmm, next); in nvkm_vmm_node_merge()
960 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_node_merge()
964 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_merge()
967 nvkm_vmm_node_delete(vmm, vma); in nvkm_vmm_node_merge()
976 nvkm_vmm_node_split(struct nvkm_vmm *vmm, in nvkm_vmm_node_split() argument
986 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_node_split()
992 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size); in nvkm_vmm_node_split()
996 nvkm_vmm_node_insert(vmm, tmp); in nvkm_vmm_node_split()
1019 nvkm_vmm_dump(struct nvkm_vmm *vmm) in nvkm_vmm_dump() argument
1022 list_for_each_entry(vma, &vmm->list, head) { in nvkm_vmm_dump()
1028 nvkm_vmm_dtor(struct nvkm_vmm *vmm) in nvkm_vmm_dtor() argument
1033 if (vmm->rm.client.gsp) { in nvkm_vmm_dtor()
1034 nvkm_gsp_rm_free(&vmm->rm.object); in nvkm_vmm_dtor()
1035 nvkm_gsp_device_dtor(&vmm->rm.device); in nvkm_vmm_dtor()
1036 nvkm_gsp_client_dtor(&vmm->rm.client); in nvkm_vmm_dtor()
1037 nvkm_vmm_put(vmm, &vmm->rm.rsvd); in nvkm_vmm_dtor()
1041 nvkm_vmm_dump(vmm); in nvkm_vmm_dtor()
1043 while ((node = rb_first(&vmm->root))) { in nvkm_vmm_dtor()
1045 nvkm_vmm_put(vmm, &vma); in nvkm_vmm_dtor()
1048 if (vmm->bootstrapped) { in nvkm_vmm_dtor()
1049 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_dtor()
1050 const u64 limit = vmm->limit - vmm->start; in nvkm_vmm_dtor()
1055 nvkm_mmu_ptc_dump(vmm->mmu); in nvkm_vmm_dtor()
1056 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit); in nvkm_vmm_dtor()
1059 vma = list_first_entry(&vmm->list, typeof(*vma), head); in nvkm_vmm_dtor()
1062 WARN_ON(!list_empty(&vmm->list)); in nvkm_vmm_dtor()
1064 if (vmm->nullp) { in nvkm_vmm_dtor()
1065 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024, in nvkm_vmm_dtor()
1066 vmm->nullp, vmm->null); in nvkm_vmm_dtor()
1069 if (vmm->pd) { in nvkm_vmm_dtor()
1070 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]); in nvkm_vmm_dtor()
1071 nvkm_vmm_pt_del(&vmm->pd); in nvkm_vmm_dtor()
1076 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size) in nvkm_vmm_ctor_managed() argument
1084 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_ctor_managed()
1085 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor_managed()
1093 struct nvkm_vmm *vmm) in nvkm_vmm_ctor() argument
1101 vmm->func = func; in nvkm_vmm_ctor()
1102 vmm->mmu = mmu; in nvkm_vmm_ctor()
1103 vmm->name = name; in nvkm_vmm_ctor()
1104 vmm->debug = mmu->subdev.debug; in nvkm_vmm_ctor()
1105 kref_init(&vmm->kref); in nvkm_vmm_ctor()
1107 __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key); in nvkm_vmm_ctor()
1108 mutex_init(&vmm->mutex.ref); in nvkm_vmm_ctor()
1109 mutex_init(&vmm->mutex.map); in nvkm_vmm_ctor()
1130 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL); in nvkm_vmm_ctor()
1131 if (!vmm->pd) in nvkm_vmm_ctor()
1133 vmm->pd->refs[0] = 1; in nvkm_vmm_ctor()
1134 INIT_LIST_HEAD(&vmm->join); in nvkm_vmm_ctor()
1141 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true); in nvkm_vmm_ctor()
1142 if (!vmm->pd->pt[0]) in nvkm_vmm_ctor()
1147 INIT_LIST_HEAD(&vmm->list); in nvkm_vmm_ctor()
1148 vmm->free = RB_ROOT; in nvkm_vmm_ctor()
1149 vmm->root = RB_ROOT; in nvkm_vmm_ctor()
1156 vmm->start = 0; in nvkm_vmm_ctor()
1157 vmm->limit = 1ULL << bits; in nvkm_vmm_ctor()
1158 if (addr + size < addr || addr + size > vmm->limit) in nvkm_vmm_ctor()
1162 if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr))) in nvkm_vmm_ctor()
1165 vmm->managed.p.addr = 0; in nvkm_vmm_ctor()
1166 vmm->managed.p.size = addr; in nvkm_vmm_ctor()
1172 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1173 list_add_tail(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1178 size = vmm->limit - addr; in nvkm_vmm_ctor()
1179 if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size))) in nvkm_vmm_ctor()
1182 vmm->managed.n.addr = addr; in nvkm_vmm_ctor()
1183 vmm->managed.n.size = size; in nvkm_vmm_ctor()
1188 vmm->start = addr; in nvkm_vmm_ctor()
1189 vmm->limit = size ? (addr + size) : (1ULL << bits); in nvkm_vmm_ctor()
1190 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits)) in nvkm_vmm_ctor()
1193 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start))) in nvkm_vmm_ctor()
1196 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_ctor()
1197 list_add(&vma->head, &vmm->list); in nvkm_vmm_ctor()
1215 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_pfn_split_merge() argument
1233 return nvkm_vmm_node_merge(vmm, prev, vma, next, size); in nvkm_vmm_pfn_split_merge()
1234 return nvkm_vmm_node_split(vmm, vma, addr, size); in nvkm_vmm_pfn_split_merge()
1238 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size) in nvkm_vmm_pfn_unmap() argument
1240 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr); in nvkm_vmm_pfn_unmap()
1254 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd], in nvkm_vmm_pfn_unmap()
1257 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false); in nvkm_vmm_pfn_unmap()
1274 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn) in nvkm_vmm_pfn_map() argument
1276 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_pfn_map()
1292 addr + size < addr || addr + size > vmm->limit) { in nvkm_vmm_pfn_map()
1293 VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n", in nvkm_vmm_pfn_map()
1298 if (!(vma = nvkm_vmm_node_search(vmm, addr))) in nvkm_vmm_pfn_map()
1338 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size, in nvkm_vmm_pfn_map()
1340 vmm->func->page, map); in nvkm_vmm_pfn_map()
1347 tmp->refd = page - vmm->func->page; in nvkm_vmm_pfn_map()
1360 ret = nvkm_vmm_ptes_get_map(vmm, page, addr, in nvkm_vmm_pfn_map()
1364 nvkm_vmm_ptes_map(vmm, page, addr, size, &args, in nvkm_vmm_pfn_map()
1369 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, in nvkm_vmm_pfn_map()
1397 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap_region() argument
1402 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_unmap_region()
1410 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size); in nvkm_vmm_unmap_region()
1414 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn) in nvkm_vmm_unmap_locked() argument
1416 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd]; in nvkm_vmm_unmap_locked()
1419 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1422 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn); in nvkm_vmm_unmap_locked()
1425 nvkm_vmm_unmap_region(vmm, vma); in nvkm_vmm_unmap_locked()
1429 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_unmap() argument
1432 mutex_lock(&vmm->mutex.vmm); in nvkm_vmm_unmap()
1433 nvkm_vmm_unmap_locked(vmm, vma, false); in nvkm_vmm_unmap()
1434 mutex_unlock(&vmm->mutex.vmm); in nvkm_vmm_unmap()
1439 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_valid() argument
1445 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift); in nvkm_vmm_map_valid()
1452 VMM_DEBUG(vmm, "%d !HOST", map->page->shift); in nvkm_vmm_map_valid()
1465 VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d", in nvkm_vmm_map_valid()
1471 return vmm->func->valid(vmm, argv, argc, map); in nvkm_vmm_map_valid()
1475 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_choose() argument
1478 for (map->page = vmm->func->page; map->page->shift; map->page++) { in nvkm_vmm_map_choose()
1479 VMM_DEBUG(vmm, "trying %d", map->page->shift); in nvkm_vmm_map_choose()
1480 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map)) in nvkm_vmm_map_choose()
1487 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, in nvkm_vmm_map_locked() argument
1497 VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx", in nvkm_vmm_map_locked()
1507 const u32 debug = vmm->debug; in nvkm_vmm_map_locked()
1508 vmm->debug = 0; in nvkm_vmm_map_locked()
1509 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1510 vmm->debug = debug; in nvkm_vmm_map_locked()
1512 VMM_DEBUG(vmm, "invalid at any page size"); in nvkm_vmm_map_locked()
1513 nvkm_vmm_map_choose(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1519 map->page = &vmm->func->page[vma->refd]; in nvkm_vmm_map_locked()
1521 map->page = &vmm->func->page[vma->page]; in nvkm_vmm_map_locked()
1523 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map); in nvkm_vmm_map_locked()
1525 VMM_DEBUG(vmm, "invalid %d\n", ret); in nvkm_vmm_map_locked()
1557 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1561 vma->refd = map->page - vmm->func->page; in nvkm_vmm_map_locked()
1563 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func); in nvkm_vmm_map_locked()
1566 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags); in nvkm_vmm_map_locked()
1575 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, in nvkm_vmm_map() argument
1580 if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) && in nvkm_vmm_map()
1581 vmm->managed.raw) in nvkm_vmm_map()
1582 return nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1584 mutex_lock(&vmm->mutex.vmm); in nvkm_vmm_map()
1585 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); in nvkm_vmm_map()
1587 mutex_unlock(&vmm->mutex.vmm); in nvkm_vmm_map()
1592 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_region() argument
1599 nvkm_vmm_free_delete(vmm, prev); in nvkm_vmm_put_region()
1604 nvkm_vmm_free_delete(vmm, next); in nvkm_vmm_put_region()
1607 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_put_region()
1611 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma) in nvkm_vmm_put_locked() argument
1613 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_put_locked()
1638 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr, in nvkm_vmm_put_locked()
1644 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size); in nvkm_vmm_put_locked()
1656 nvkm_vmm_unmap_region(vmm, next); in nvkm_vmm_put_locked()
1668 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size); in nvkm_vmm_put_locked()
1679 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false); in nvkm_vmm_put_locked()
1683 nvkm_vmm_node_remove(vmm, vma); in nvkm_vmm_put_locked()
1689 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_put_locked()
1693 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma) in nvkm_vmm_put() argument
1697 mutex_lock(&vmm->mutex.vmm); in nvkm_vmm_put()
1698 nvkm_vmm_put_locked(vmm, vma); in nvkm_vmm_put()
1699 mutex_unlock(&vmm->mutex.vmm); in nvkm_vmm_put()
1705 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse, in nvkm_vmm_get_locked() argument
1708 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE]; in nvkm_vmm_get_locked()
1714 VMM_TRACE(vmm, "getref %d mapref %d sparse %d " in nvkm_vmm_get_locked()
1720 VMM_DEBUG(vmm, "args %016llx %d %d %d", in nvkm_vmm_get_locked()
1731 if (unlikely((getref || vmm->func->page_block) && !shift)) { in nvkm_vmm_get_locked()
1732 VMM_DEBUG(vmm, "page size required: %d %016llx", in nvkm_vmm_get_locked()
1733 getref, vmm->func->page_block); in nvkm_vmm_get_locked()
1741 for (page = vmm->func->page; page->shift; page++) { in nvkm_vmm_get_locked()
1747 VMM_DEBUG(vmm, "page %d %016llx", shift, size); in nvkm_vmm_get_locked()
1756 temp = vmm->free.rb_node; in nvkm_vmm_get_locked()
1777 const int p = page - vmm->func->page; in nvkm_vmm_get_locked()
1780 if (vmm->func->page_block && prev && prev->page != p) in nvkm_vmm_get_locked()
1781 addr = ALIGN(addr, vmm->func->page_block); in nvkm_vmm_get_locked()
1785 if (vmm->func->page_block && next && next->page != p) in nvkm_vmm_get_locked()
1786 tail = ALIGN_DOWN(tail, vmm->func->page_block); in nvkm_vmm_get_locked()
1789 nvkm_vmm_free_remove(vmm, this); in nvkm_vmm_get_locked()
1803 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1806 nvkm_vmm_free_insert(vmm, vma); in nvkm_vmm_get_locked()
1812 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1815 nvkm_vmm_free_insert(vmm, tmp); in nvkm_vmm_get_locked()
1820 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1822 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true); in nvkm_vmm_get_locked()
1824 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size); in nvkm_vmm_get_locked()
1828 nvkm_vmm_put_region(vmm, vma); in nvkm_vmm_get_locked()
1834 vma->page = page - vmm->func->page; in nvkm_vmm_get_locked()
1837 nvkm_vmm_node_insert(vmm, vma); in nvkm_vmm_get_locked()
1843 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) in nvkm_vmm_get() argument
1846 mutex_lock(&vmm->mutex.vmm); in nvkm_vmm_get()
1847 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma); in nvkm_vmm_get()
1848 mutex_unlock(&vmm->mutex.vmm); in nvkm_vmm_get()
1853 nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size, in nvkm_vmm_raw_unmap() argument
1856 const struct nvkm_vmm_page *page = &vmm->func->page[refd]; in nvkm_vmm_raw_unmap()
1858 nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false); in nvkm_vmm_raw_unmap()
1862 nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd) in nvkm_vmm_raw_put() argument
1864 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_raw_put()
1866 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size); in nvkm_vmm_raw_put()
1870 nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd) in nvkm_vmm_raw_get() argument
1872 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_raw_get()
1877 return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size); in nvkm_vmm_raw_get()
1881 nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref) in nvkm_vmm_raw_sparse() argument
1885 mutex_lock(&vmm->mutex.ref); in nvkm_vmm_raw_sparse()
1886 ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref); in nvkm_vmm_raw_sparse()
1887 mutex_unlock(&vmm->mutex.ref); in nvkm_vmm_raw_sparse()
1893 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in nvkm_vmm_part() argument
1895 if (inst && vmm && vmm->func->part) { in nvkm_vmm_part()
1896 mutex_lock(&vmm->mutex.vmm); in nvkm_vmm_part()
1897 vmm->func->part(vmm, inst); in nvkm_vmm_part()
1898 mutex_unlock(&vmm->mutex.vmm); in nvkm_vmm_part()
1903 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) in nvkm_vmm_join() argument
1906 if (vmm->func->join) { in nvkm_vmm_join()
1907 mutex_lock(&vmm->mutex.vmm); in nvkm_vmm_join()
1908 ret = vmm->func->join(vmm, inst); in nvkm_vmm_join()
1909 mutex_unlock(&vmm->mutex.vmm); in nvkm_vmm_join()
1919 nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm); in nvkm_vmm_boot_ptes()
1924 nvkm_vmm_boot(struct nvkm_vmm *vmm) in nvkm_vmm_boot() argument
1926 const struct nvkm_vmm_page *page = vmm->func->page; in nvkm_vmm_boot()
1927 const u64 limit = vmm->limit - vmm->start; in nvkm_vmm_boot()
1933 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit); in nvkm_vmm_boot()
1937 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false, in nvkm_vmm_boot()
1939 vmm->bootstrapped = true; in nvkm_vmm_boot()
1946 struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref); in nvkm_vmm_del() local
1947 nvkm_vmm_dtor(vmm); in nvkm_vmm_del()
1948 kfree(vmm); in nvkm_vmm_del()
1954 struct nvkm_vmm *vmm = *pvmm; in nvkm_vmm_unref() local
1955 if (vmm) { in nvkm_vmm_unref()
1956 kref_put(&vmm->kref, nvkm_vmm_del); in nvkm_vmm_unref()
1962 nvkm_vmm_ref(struct nvkm_vmm *vmm) in nvkm_vmm_ref() argument
1964 if (vmm) in nvkm_vmm_ref()
1965 kref_get(&vmm->kref); in nvkm_vmm_ref()
1966 return vmm; in nvkm_vmm_ref()
1975 struct nvkm_vmm *vmm = NULL; in nvkm_vmm_new() local
1977 ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc, in nvkm_vmm_new()
1978 key, name, &vmm); in nvkm_vmm_new()
1980 nvkm_vmm_unref(&vmm); in nvkm_vmm_new()
1981 *pvmm = vmm; in nvkm_vmm_new()