xref: /linux/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c (revision 80626ae6ffe57917915c6e6d8ea1e908689954fd)
1 /*
2  * Copyright 2023 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <subdev/mmu/vmm.h>
23 
24 #include <nvhw/drf.h>
25 #include "nvrm/vmm.h"
26 
27 void
r535_mmu_vaspace_del(struct nvkm_vmm * vmm)28 r535_mmu_vaspace_del(struct nvkm_vmm *vmm)
29 {
30 	if (vmm->rm.external) {
31 		NV0080_CTRL_DMA_UNSET_PAGE_DIRECTORY_PARAMS *ctrl;
32 
33 		ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
34 					    NV0080_CTRL_CMD_DMA_UNSET_PAGE_DIRECTORY,
35 					    sizeof(*ctrl));
36 		if (!IS_ERR(ctrl)) {
37 			ctrl->hVASpace = vmm->rm.object.handle;
38 
39 			WARN_ON(nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl));
40 		}
41 
42 		vmm->rm.external = false;
43 	}
44 
45 	nvkm_gsp_rm_free(&vmm->rm.object);
46 	nvkm_gsp_device_dtor(&vmm->rm.device);
47 	nvkm_gsp_client_dtor(&vmm->rm.client);
48 
49 	nvkm_vmm_put(vmm, &vmm->rm.rsvd);
50 }
51 
52 int
r535_mmu_vaspace_new(struct nvkm_vmm * vmm,u32 handle,bool external)53 r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external)
54 {
55 	NV_VASPACE_ALLOCATION_PARAMETERS *args;
56 	int ret;
57 
58 	ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
59 					  &vmm->rm.client, &vmm->rm.device);
60 	if (ret)
61 		return ret;
62 
63 	args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, handle, FERMI_VASPACE_A,
64 				     sizeof(*args), &vmm->rm.object);
65 	if (IS_ERR(args))
66 		return PTR_ERR(args);
67 
68 	args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
69 	if (external)
70 		args->flags = NV_VASPACE_ALLOCATION_FLAGS_IS_EXTERNALLY_OWNED;
71 
72 	ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
73 	if (ret)
74 		return ret;
75 
76 	if (!external) {
77 		NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
78 		u8 page_shift = 29; /* 512MiB */
79 		const u64 page_size = BIT_ULL(page_shift);
80 		const struct nvkm_vmm_page *page;
81 		const struct nvkm_vmm_desc *desc;
82 		struct nvkm_vmm_pt *pd = vmm->pd;
83 
84 		for (page = vmm->func->page; page->shift; page++) {
85 			if (page->shift == page_shift)
86 				break;
87 		}
88 
89 		if (WARN_ON(!page->shift))
90 			return -EINVAL;
91 
92 		mutex_lock(&vmm->mutex.vmm);
93 		ret = nvkm_vmm_get_locked(vmm, true, false, false, page_shift, 32, page_size,
94 					  &vmm->rm.rsvd);
95 		mutex_unlock(&vmm->mutex.vmm);
96 		if (ret)
97 			return ret;
98 
99 		/* Some parts of RM expect the server-reserved area to be in a specific location. */
100 		if (WARN_ON(vmm->rm.rsvd->addr != SPLIT_VAS_SERVER_RM_MANAGED_VA_START ||
101 			    vmm->rm.rsvd->size != SPLIT_VAS_SERVER_RM_MANAGED_VA_SIZE))
102 			return -EINVAL;
103 
104 		ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
105 					    NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
106 					    sizeof(*ctrl));
107 		if (IS_ERR(ctrl))
108 			return PTR_ERR(ctrl);
109 
110 		ctrl->pageSize = page_size;
111 		ctrl->virtAddrLo = vmm->rm.rsvd->addr;
112 		ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
113 
114 		for (desc = page->desc; desc->bits; desc++) {
115 			ctrl->numLevelsToCopy++;
116 			page_shift += desc->bits;
117 		}
118 		desc--;
119 
120 		for (int i = 0; i < ctrl->numLevelsToCopy; i++, desc--) {
121 			page_shift -= desc->bits;
122 
123 			ctrl->levels[i].physAddress = pd->pt[0]->addr;
124 			ctrl->levels[i].size = BIT_ULL(desc->bits) * desc->size;
125 			ctrl->levels[i].aperture = 1;
126 			ctrl->levels[i].pageShift = page_shift;
127 
128 			pd = pd->pde[0];
129 		}
130 
131 		ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
132 	} else {
133 		NV0080_CTRL_DMA_SET_PAGE_DIRECTORY_PARAMS *ctrl;
134 
135 		ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.object,
136 					    NV0080_CTRL_CMD_DMA_SET_PAGE_DIRECTORY,
137 					    sizeof(*ctrl));
138 		if (IS_ERR(ctrl))
139 			return PTR_ERR(ctrl);
140 
141 		ctrl->physAddress = vmm->pd->pt[0]->addr;
142 		ctrl->numEntries = 1 << vmm->func->page[0].desc->bits;
143 		ctrl->flags = NVDEF(NV0080_CTRL_DMA_SET_PAGE_DIRECTORY, FLAGS, APERTURE, VIDMEM);
144 		ctrl->hVASpace = vmm->rm.object.handle;
145 
146 		ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.object, ctrl);
147 		if (ret == 0)
148 			vmm->rm.external = true;
149 	}
150 
151 	return ret;
152 }
153 
154 static int
r535_mmu_promote_vmm(struct nvkm_vmm * vmm)155 r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
156 {
157 	return r535_mmu_vaspace_new(vmm, NVKM_RM_VASPACE, true);
158 }
159 
160 static void
r535_mmu_dtor(struct nvkm_mmu * mmu)161 r535_mmu_dtor(struct nvkm_mmu *mmu)
162 {
163 	kfree(mmu->func);
164 }
165 
166 int
r535_mmu_new(const struct nvkm_mmu_func * hw,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_mmu ** pmmu)167 r535_mmu_new(const struct nvkm_mmu_func *hw,
168 	     struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
169 	     struct nvkm_mmu **pmmu)
170 {
171 	struct nvkm_mmu_func *rm;
172 	int ret;
173 
174 	if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
175 		return -ENOMEM;
176 
177 	rm->dtor = r535_mmu_dtor;
178 	rm->dma_bits = hw->dma_bits;
179 	rm->mmu = hw->mmu;
180 	rm->mem = hw->mem;
181 	rm->vmm = hw->vmm;
182 	rm->kind = hw->kind;
183 	rm->kind_sys = hw->kind_sys;
184 	rm->promote_vmm = r535_mmu_promote_vmm;
185 
186 	ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
187 	if (ret)
188 		kfree(rm);
189 
190 	return ret;
191 }
192