xref: /linux/drivers/gpu/drm/i915/display/intel_dpt.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_domain.h"
7 #include "gem/i915_gem_internal.h"
8 #include "gem/i915_gem_lmem.h"
9 #include "gt/gen8_ppgtt.h"
10 
11 #include "i915_drv.h"
12 #include "intel_display_core.h"
13 #include "intel_display_rpm.h"
14 #include "intel_display_types.h"
15 #include "intel_dpt.h"
16 #include "intel_fb.h"
17 
18 struct i915_dpt {
19 	struct i915_address_space vm;
20 
21 	struct drm_i915_gem_object *obj;
22 	struct i915_vma *vma;
23 	void __iomem *iomem;
24 };
25 
26 #define i915_is_dpt(vm) ((vm)->is_dpt)
27 
28 static inline struct i915_dpt *
i915_vm_to_dpt(struct i915_address_space * vm)29 i915_vm_to_dpt(struct i915_address_space *vm)
30 {
31 	BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
32 	drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm));
33 	return container_of(vm, struct i915_dpt, vm);
34 }
35 
36 #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
37 
gen8_set_pte(void __iomem * addr,gen8_pte_t pte)38 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
39 {
40 	writeq(pte, addr);
41 }
42 
dpt_insert_page(struct i915_address_space * vm,dma_addr_t addr,u64 offset,unsigned int pat_index,u32 flags)43 static void dpt_insert_page(struct i915_address_space *vm,
44 			    dma_addr_t addr,
45 			    u64 offset,
46 			    unsigned int pat_index,
47 			    u32 flags)
48 {
49 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
50 	gen8_pte_t __iomem *base = dpt->iomem;
51 
52 	gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
53 		     vm->pte_encode(addr, pat_index, flags));
54 }
55 
dpt_insert_entries(struct i915_address_space * vm,struct i915_vma_resource * vma_res,unsigned int pat_index,u32 flags)56 static void dpt_insert_entries(struct i915_address_space *vm,
57 			       struct i915_vma_resource *vma_res,
58 			       unsigned int pat_index,
59 			       u32 flags)
60 {
61 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
62 	gen8_pte_t __iomem *base = dpt->iomem;
63 	const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
64 	struct sgt_iter sgt_iter;
65 	dma_addr_t addr;
66 	int i;
67 
68 	/*
69 	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
70 	 * not to allow the user to override access to a read only page.
71 	 */
72 
73 	i = vma_res->start / I915_GTT_PAGE_SIZE;
74 	for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
75 		gen8_set_pte(&base[i++], pte_encode | addr);
76 }
77 
dpt_clear_range(struct i915_address_space * vm,u64 start,u64 length)78 static void dpt_clear_range(struct i915_address_space *vm,
79 			    u64 start, u64 length)
80 {
81 }
82 
dpt_bind_vma(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma_resource * vma_res,unsigned int pat_index,u32 flags)83 static void dpt_bind_vma(struct i915_address_space *vm,
84 			 struct i915_vm_pt_stash *stash,
85 			 struct i915_vma_resource *vma_res,
86 			 unsigned int pat_index,
87 			 u32 flags)
88 {
89 	u32 pte_flags;
90 
91 	if (vma_res->bound_flags)
92 		return;
93 
94 	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
95 	pte_flags = 0;
96 	if (vm->has_read_only && vma_res->bi.readonly)
97 		pte_flags |= PTE_READ_ONLY;
98 	if (vma_res->bi.lmem)
99 		pte_flags |= PTE_LM;
100 
101 	vm->insert_entries(vm, vma_res, pat_index, pte_flags);
102 
103 	vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
104 
105 	/*
106 	 * Without aliasing PPGTT there's no difference between
107 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
108 	 * upgrade to both bound if we bind either to avoid double-binding.
109 	 */
110 	vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
111 }
112 
dpt_unbind_vma(struct i915_address_space * vm,struct i915_vma_resource * vma_res)113 static void dpt_unbind_vma(struct i915_address_space *vm,
114 			   struct i915_vma_resource *vma_res)
115 {
116 	vm->clear_range(vm, vma_res->start, vma_res->vma_size);
117 }
118 
dpt_cleanup(struct i915_address_space * vm)119 static void dpt_cleanup(struct i915_address_space *vm)
120 {
121 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
122 
123 	i915_gem_object_put(dpt->obj);
124 }
125 
intel_dpt_pin_to_ggtt(struct i915_address_space * vm,unsigned int alignment)126 struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
127 				       unsigned int alignment)
128 {
129 	struct drm_i915_private *i915 = vm->i915;
130 	struct intel_display *display = i915->display;
131 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
132 	struct ref_tracker *wakeref;
133 	struct i915_vma *vma;
134 	void __iomem *iomem;
135 	struct i915_gem_ww_ctx ww;
136 	u64 pin_flags = 0;
137 	int err;
138 
139 	if (i915_gem_object_is_stolen(dpt->obj))
140 		pin_flags |= PIN_MAPPABLE;
141 
142 	wakeref = intel_display_rpm_get(display);
143 	atomic_inc(&display->restore.pending_fb_pin);
144 
145 	for_i915_gem_ww(&ww, err, true) {
146 		err = i915_gem_object_lock(dpt->obj, &ww);
147 		if (err)
148 			continue;
149 
150 		vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0,
151 						  alignment, pin_flags);
152 		if (IS_ERR(vma)) {
153 			err = PTR_ERR(vma);
154 			continue;
155 		}
156 
157 		iomem = i915_vma_pin_iomap(vma);
158 		i915_vma_unpin(vma);
159 
160 		if (IS_ERR(iomem)) {
161 			err = PTR_ERR(iomem);
162 			continue;
163 		}
164 
165 		dpt->vma = vma;
166 		dpt->iomem = iomem;
167 
168 		i915_vma_get(vma);
169 	}
170 
171 	dpt->obj->mm.dirty = true;
172 
173 	atomic_dec(&display->restore.pending_fb_pin);
174 	intel_display_rpm_put(display, wakeref);
175 
176 	return err ? ERR_PTR(err) : vma;
177 }
178 
intel_dpt_unpin_from_ggtt(struct i915_address_space * vm)179 void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
180 {
181 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
182 
183 	i915_vma_unpin_iomap(dpt->vma);
184 	i915_vma_put(dpt->vma);
185 }
186 
187 /**
188  * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
189  * @display: display device instance
190  *
191  * Restore the memory mapping during system resume for all framebuffers which
192  * are mapped to HW via a GGTT->DPT page table. The content of these page
193  * tables are not stored in the hibernation image during S4 and S3RST->S4
194  * transitions, so here we reprogram the PTE entries in those tables.
195  *
196  * This function must be called after the mappings in GGTT have been restored calling
197  * i915_ggtt_resume().
198  */
intel_dpt_resume(struct intel_display * display)199 void intel_dpt_resume(struct intel_display *display)
200 {
201 	struct drm_framebuffer *drm_fb;
202 
203 	if (!HAS_DISPLAY(display))
204 		return;
205 
206 	mutex_lock(&display->drm->mode_config.fb_lock);
207 	drm_for_each_fb(drm_fb, display->drm) {
208 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
209 
210 		if (fb->dpt_vm)
211 			i915_ggtt_resume_vm(fb->dpt_vm, true);
212 	}
213 	mutex_unlock(&display->drm->mode_config.fb_lock);
214 }
215 
216 /**
217  * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
218  * @display: display device instance
219  *
220  * Suspend the memory mapping during system suspend for all framebuffers which
221  * are mapped to HW via a GGTT->DPT page table.
222  *
223  * This function must be called before the mappings in GGTT are suspended calling
224  * i915_ggtt_suspend().
225  */
intel_dpt_suspend(struct intel_display * display)226 void intel_dpt_suspend(struct intel_display *display)
227 {
228 	struct drm_framebuffer *drm_fb;
229 
230 	if (!HAS_DISPLAY(display))
231 		return;
232 
233 	mutex_lock(&display->drm->mode_config.fb_lock);
234 
235 	drm_for_each_fb(drm_fb, display->drm) {
236 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
237 
238 		if (fb->dpt_vm)
239 			i915_ggtt_suspend_vm(fb->dpt_vm, true);
240 	}
241 
242 	mutex_unlock(&display->drm->mode_config.fb_lock);
243 }
244 
245 struct i915_address_space *
intel_dpt_create(struct intel_framebuffer * fb)246 intel_dpt_create(struct intel_framebuffer *fb)
247 {
248 	struct drm_gem_object *obj = intel_fb_bo(&fb->base);
249 	struct drm_i915_private *i915 = to_i915(obj->dev);
250 	struct drm_i915_gem_object *dpt_obj;
251 	struct i915_address_space *vm;
252 	struct i915_dpt *dpt;
253 	size_t size;
254 	int ret;
255 
256 	if (intel_fb_needs_pot_stride_remap(fb))
257 		size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
258 	else
259 		size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
260 
261 	size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
262 
263 	dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
264 	if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
265 		dpt_obj = i915_gem_object_create_stolen(i915, size);
266 	if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
267 		drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
268 		dpt_obj = i915_gem_object_create_shmem(i915, size);
269 	}
270 	if (IS_ERR(dpt_obj))
271 		return ERR_CAST(dpt_obj);
272 
273 	ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
274 	if (!ret) {
275 		ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
276 		i915_gem_object_unlock(dpt_obj);
277 	}
278 	if (ret) {
279 		i915_gem_object_put(dpt_obj);
280 		return ERR_PTR(ret);
281 	}
282 
283 	dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
284 	if (!dpt) {
285 		i915_gem_object_put(dpt_obj);
286 		return ERR_PTR(-ENOMEM);
287 	}
288 
289 	vm = &dpt->vm;
290 
291 	vm->gt = to_gt(i915);
292 	vm->i915 = i915;
293 	vm->dma = i915->drm.dev;
294 	vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
295 	vm->is_dpt = true;
296 
297 	i915_address_space_init(vm, VM_CLASS_DPT);
298 
299 	vm->insert_page = dpt_insert_page;
300 	vm->clear_range = dpt_clear_range;
301 	vm->insert_entries = dpt_insert_entries;
302 	vm->cleanup = dpt_cleanup;
303 
304 	vm->vma_ops.bind_vma    = dpt_bind_vma;
305 	vm->vma_ops.unbind_vma  = dpt_unbind_vma;
306 
307 	vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
308 
309 	dpt->obj = dpt_obj;
310 	dpt->obj->is_dpt = true;
311 
312 	return &dpt->vm;
313 }
314 
intel_dpt_destroy(struct i915_address_space * vm)315 void intel_dpt_destroy(struct i915_address_space *vm)
316 {
317 	struct i915_dpt *dpt = i915_vm_to_dpt(vm);
318 
319 	dpt->obj->is_dpt = false;
320 	i915_vm_put(&dpt->vm);
321 }
322 
intel_dpt_offset(struct i915_vma * dpt_vma)323 u64 intel_dpt_offset(struct i915_vma *dpt_vma)
324 {
325 	return dpt_vma->node.start;
326 }
327