1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include <drm/ttm/ttm_bo.h>
7
8 #include "i915_vma.h"
9 #include "intel_display_types.h"
10 #include "intel_dpt.h"
11 #include "intel_fb.h"
12 #include "intel_fb_pin.h"
13 #include "intel_fbdev.h"
14 #include "xe_bo.h"
15 #include "xe_device.h"
16 #include "xe_ggtt.h"
17 #include "xe_pm.h"
18
19 static void
write_dpt_rotated(struct xe_bo * bo,struct iosys_map * map,u32 * dpt_ofs,u32 bo_ofs,u32 width,u32 height,u32 src_stride,u32 dst_stride)20 write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
21 u32 width, u32 height, u32 src_stride, u32 dst_stride)
22 {
23 struct xe_device *xe = xe_bo_device(bo);
24 struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
25 u32 column, row;
26
27 /* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
28 * by writing dpt/ggtt in a different order?
29 */
30
31 for (column = 0; column < width; column++) {
32 u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
33
34 for (row = 0; row < height; row++) {
35 u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
36 xe->pat.idx[XE_CACHE_NONE]);
37
38 iosys_map_wr(map, *dpt_ofs, u64, pte);
39 *dpt_ofs += 8;
40 src_idx -= src_stride;
41 }
42
43 /* The DE ignores the PTEs for the padding tiles */
44 *dpt_ofs += (dst_stride - height) * 8;
45 }
46
47 /* Align to next page */
48 *dpt_ofs = ALIGN(*dpt_ofs, 4096);
49 }
50
51 static void
write_dpt_remapped(struct xe_bo * bo,struct iosys_map * map,u32 * dpt_ofs,u32 bo_ofs,u32 width,u32 height,u32 src_stride,u32 dst_stride)52 write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
53 u32 bo_ofs, u32 width, u32 height, u32 src_stride,
54 u32 dst_stride)
55 {
56 struct xe_device *xe = xe_bo_device(bo);
57 struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
58 u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index)
59 = ggtt->pt_ops->pte_encode_bo;
60 u32 column, row;
61
62 for (row = 0; row < height; row++) {
63 u32 src_idx = src_stride * row + bo_ofs;
64
65 for (column = 0; column < width; column++) {
66 iosys_map_wr(map, *dpt_ofs, u64,
67 pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
68 xe->pat.idx[XE_CACHE_NONE]));
69
70 *dpt_ofs += 8;
71 src_idx++;
72 }
73
74 /* The DE ignores the PTEs for the padding tiles */
75 *dpt_ofs += (dst_stride - width) * 8;
76 }
77
78 /* Align to next page */
79 *dpt_ofs = ALIGN(*dpt_ofs, 4096);
80 }
81
__xe_pin_fb_vma_dpt(const struct intel_framebuffer * fb,const struct i915_gtt_view * view,struct i915_vma * vma,unsigned int alignment)82 static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
83 const struct i915_gtt_view *view,
84 struct i915_vma *vma,
85 unsigned int alignment)
86 {
87 struct xe_device *xe = to_xe_device(fb->base.dev);
88 struct xe_tile *tile0 = xe_device_get_root_tile(xe);
89 struct xe_ggtt *ggtt = tile0->mem.ggtt;
90 struct drm_gem_object *obj = intel_fb_bo(&fb->base);
91 struct xe_bo *bo = gem_to_xe_bo(obj), *dpt;
92 u32 dpt_size, size = bo->ttm.base.size;
93
94 if (view->type == I915_GTT_VIEW_NORMAL)
95 dpt_size = ALIGN(size / XE_PAGE_SIZE * 8, XE_PAGE_SIZE);
96 else if (view->type == I915_GTT_VIEW_REMAPPED)
97 dpt_size = ALIGN(intel_remapped_info_size(&fb->remapped_view.gtt.remapped) * 8,
98 XE_PAGE_SIZE);
99 else
100 /* display uses 4K tiles instead of bytes here, convert to entries.. */
101 dpt_size = ALIGN(intel_rotation_info_size(&view->rotated) * 8,
102 XE_PAGE_SIZE);
103
104 if (IS_DGFX(xe))
105 dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
106 dpt_size, ~0ull,
107 ttm_bo_type_kernel,
108 XE_BO_FLAG_VRAM0 |
109 XE_BO_FLAG_GGTT |
110 XE_BO_FLAG_PAGETABLE,
111 alignment);
112 else
113 dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
114 dpt_size, ~0ull,
115 ttm_bo_type_kernel,
116 XE_BO_FLAG_STOLEN |
117 XE_BO_FLAG_GGTT |
118 XE_BO_FLAG_PAGETABLE,
119 alignment);
120 if (IS_ERR(dpt))
121 dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
122 dpt_size, ~0ull,
123 ttm_bo_type_kernel,
124 XE_BO_FLAG_SYSTEM |
125 XE_BO_FLAG_GGTT |
126 XE_BO_FLAG_PAGETABLE,
127 alignment);
128 if (IS_ERR(dpt))
129 return PTR_ERR(dpt);
130
131 if (view->type == I915_GTT_VIEW_NORMAL) {
132 u32 x;
133
134 for (x = 0; x < size / XE_PAGE_SIZE; x++) {
135 u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
136 xe->pat.idx[XE_CACHE_NONE]);
137
138 iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
139 }
140 } else if (view->type == I915_GTT_VIEW_REMAPPED) {
141 const struct intel_remapped_info *remap_info = &view->remapped;
142 u32 i, dpt_ofs = 0;
143
144 for (i = 0; i < ARRAY_SIZE(remap_info->plane); i++)
145 write_dpt_remapped(bo, &dpt->vmap, &dpt_ofs,
146 remap_info->plane[i].offset,
147 remap_info->plane[i].width,
148 remap_info->plane[i].height,
149 remap_info->plane[i].src_stride,
150 remap_info->plane[i].dst_stride);
151
152 } else {
153 const struct intel_rotation_info *rot_info = &view->rotated;
154 u32 i, dpt_ofs = 0;
155
156 for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
157 write_dpt_rotated(bo, &dpt->vmap, &dpt_ofs,
158 rot_info->plane[i].offset,
159 rot_info->plane[i].width,
160 rot_info->plane[i].height,
161 rot_info->plane[i].src_stride,
162 rot_info->plane[i].dst_stride);
163 }
164
165 vma->dpt = dpt;
166 vma->node = dpt->ggtt_node[tile0->id];
167 return 0;
168 }
169
170 static void
write_ggtt_rotated(struct xe_bo * bo,struct xe_ggtt * ggtt,u32 * ggtt_ofs,u32 bo_ofs,u32 width,u32 height,u32 src_stride,u32 dst_stride)171 write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo_ofs,
172 u32 width, u32 height, u32 src_stride, u32 dst_stride)
173 {
174 struct xe_device *xe = xe_bo_device(bo);
175 u32 column, row;
176
177 for (column = 0; column < width; column++) {
178 u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
179
180 for (row = 0; row < height; row++) {
181 u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
182 xe->pat.idx[XE_CACHE_NONE]);
183
184 ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte);
185 *ggtt_ofs += XE_PAGE_SIZE;
186 src_idx -= src_stride;
187 }
188
189 /* The DE ignores the PTEs for the padding tiles */
190 *ggtt_ofs += (dst_stride - height) * XE_PAGE_SIZE;
191 }
192 }
193
__xe_pin_fb_vma_ggtt(const struct intel_framebuffer * fb,const struct i915_gtt_view * view,struct i915_vma * vma,unsigned int alignment)194 static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
195 const struct i915_gtt_view *view,
196 struct i915_vma *vma,
197 unsigned int alignment)
198 {
199 struct drm_gem_object *obj = intel_fb_bo(&fb->base);
200 struct xe_bo *bo = gem_to_xe_bo(obj);
201 struct xe_device *xe = to_xe_device(fb->base.dev);
202 struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
203 u32 align;
204 int ret;
205
206 /* TODO: Consider sharing framebuffer mapping?
207 * embed i915_vma inside intel_framebuffer
208 */
209 xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
210 ret = mutex_lock_interruptible(&ggtt->lock);
211 if (ret)
212 goto out;
213
214 align = XE_PAGE_SIZE;
215 if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
216 align = max_t(u32, align, SZ_64K);
217
218 if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
219 vma->node = bo->ggtt_node[ggtt->tile->id];
220 } else if (view->type == I915_GTT_VIEW_NORMAL) {
221 u32 x, size = bo->ttm.base.size;
222
223 vma->node = xe_ggtt_node_init(ggtt);
224 if (IS_ERR(vma->node)) {
225 ret = PTR_ERR(vma->node);
226 goto out_unlock;
227 }
228
229 ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
230 if (ret) {
231 xe_ggtt_node_fini(vma->node);
232 goto out_unlock;
233 }
234
235 for (x = 0; x < size; x += XE_PAGE_SIZE) {
236 u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
237 xe->pat.idx[XE_CACHE_NONE]);
238
239 ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte);
240 }
241 } else {
242 u32 i, ggtt_ofs;
243 const struct intel_rotation_info *rot_info = &view->rotated;
244
245 /* display seems to use tiles instead of bytes here, so convert it back.. */
246 u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE;
247
248 vma->node = xe_ggtt_node_init(ggtt);
249 if (IS_ERR(vma->node)) {
250 ret = PTR_ERR(vma->node);
251 goto out_unlock;
252 }
253
254 ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
255 if (ret) {
256 xe_ggtt_node_fini(vma->node);
257 goto out_unlock;
258 }
259
260 ggtt_ofs = vma->node->base.start;
261
262 for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++)
263 write_ggtt_rotated(bo, ggtt, &ggtt_ofs,
264 rot_info->plane[i].offset,
265 rot_info->plane[i].width,
266 rot_info->plane[i].height,
267 rot_info->plane[i].src_stride,
268 rot_info->plane[i].dst_stride);
269 }
270
271 out_unlock:
272 mutex_unlock(&ggtt->lock);
273 out:
274 xe_pm_runtime_put(tile_to_xe(ggtt->tile));
275 return ret;
276 }
277
__xe_pin_fb_vma(const struct intel_framebuffer * fb,const struct i915_gtt_view * view,unsigned int alignment)278 static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
279 const struct i915_gtt_view *view,
280 unsigned int alignment)
281 {
282 struct drm_device *dev = fb->base.dev;
283 struct xe_device *xe = to_xe_device(dev);
284 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
285 struct drm_gem_object *obj = intel_fb_bo(&fb->base);
286 struct xe_bo *bo = gem_to_xe_bo(obj);
287 int ret;
288
289 if (!vma)
290 return ERR_PTR(-ENODEV);
291
292 refcount_set(&vma->ref, 1);
293 if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
294 intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
295 !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
296 struct xe_tile *tile = xe_device_get_root_tile(xe);
297
298 /*
299 * If we need to able to access the clear-color value stored in
300 * the buffer, then we require that such buffers are also CPU
301 * accessible. This is important on small-bar systems where
302 * only some subset of VRAM is CPU accessible.
303 */
304 if (tile->mem.vram.io_size < tile->mem.vram.usable_size) {
305 ret = -EINVAL;
306 goto err;
307 }
308 }
309
310 /*
311 * Pin the framebuffer, we can't use xe_bo_(un)pin functions as the
312 * assumptions are incorrect for framebuffers
313 */
314 ret = ttm_bo_reserve(&bo->ttm, false, false, NULL);
315 if (ret)
316 goto err;
317
318 if (IS_DGFX(xe))
319 ret = xe_bo_migrate(bo, XE_PL_VRAM0);
320 else
321 ret = xe_bo_validate(bo, NULL, true);
322 if (!ret)
323 ttm_bo_pin(&bo->ttm);
324 ttm_bo_unreserve(&bo->ttm);
325 if (ret)
326 goto err;
327
328 vma->bo = bo;
329 if (intel_fb_uses_dpt(&fb->base))
330 ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment);
331 else
332 ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment);
333 if (ret)
334 goto err_unpin;
335
336 /* Ensure DPT writes are flushed */
337 xe_device_l2_flush(xe);
338 return vma;
339
340 err_unpin:
341 ttm_bo_reserve(&bo->ttm, false, false, NULL);
342 ttm_bo_unpin(&bo->ttm);
343 ttm_bo_unreserve(&bo->ttm);
344 err:
345 kfree(vma);
346 return ERR_PTR(ret);
347 }
348
__xe_unpin_fb_vma(struct i915_vma * vma)349 static void __xe_unpin_fb_vma(struct i915_vma *vma)
350 {
351 u8 tile_id = vma->node->ggtt->tile->id;
352
353 if (!refcount_dec_and_test(&vma->ref))
354 return;
355
356 if (vma->dpt)
357 xe_bo_unpin_map_no_vm(vma->dpt);
358 else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
359 vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start)
360 xe_ggtt_node_remove(vma->node, false);
361
362 ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
363 ttm_bo_unpin(&vma->bo->ttm);
364 ttm_bo_unreserve(&vma->bo->ttm);
365 kfree(vma);
366 }
367
368 struct i915_vma *
intel_fb_pin_to_ggtt(const struct drm_framebuffer * fb,const struct i915_gtt_view * view,unsigned int alignment,unsigned int phys_alignment,unsigned int vtd_guard,bool uses_fence,unsigned long * out_flags)369 intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
370 const struct i915_gtt_view *view,
371 unsigned int alignment,
372 unsigned int phys_alignment,
373 unsigned int vtd_guard,
374 bool uses_fence,
375 unsigned long *out_flags)
376 {
377 *out_flags = 0;
378
379 return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment);
380 }
381
intel_fb_unpin_vma(struct i915_vma * vma,unsigned long flags)382 void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
383 {
384 __xe_unpin_fb_vma(vma);
385 }
386
reuse_vma(struct intel_plane_state * new_plane_state,const struct intel_plane_state * old_plane_state)387 static bool reuse_vma(struct intel_plane_state *new_plane_state,
388 const struct intel_plane_state *old_plane_state)
389 {
390 struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
391 struct xe_device *xe = to_xe_device(fb->base.dev);
392 struct i915_vma *vma;
393
394 if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
395 !memcmp(&old_plane_state->view.gtt,
396 &new_plane_state->view.gtt,
397 sizeof(new_plane_state->view.gtt))) {
398 vma = old_plane_state->ggtt_vma;
399 goto found;
400 }
401
402 if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) {
403 vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev);
404 if (vma)
405 goto found;
406 }
407
408 return false;
409
410 found:
411 refcount_inc(&vma->ref);
412 new_plane_state->ggtt_vma = vma;
413 return true;
414 }
415
intel_plane_pin_fb(struct intel_plane_state * new_plane_state,const struct intel_plane_state * old_plane_state)416 int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
417 const struct intel_plane_state *old_plane_state)
418 {
419 struct drm_framebuffer *fb = new_plane_state->hw.fb;
420 struct drm_gem_object *obj = intel_fb_bo(fb);
421 struct xe_bo *bo = gem_to_xe_bo(obj);
422 struct i915_vma *vma;
423 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
424 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
425 unsigned int alignment = plane->min_alignment(plane, fb, 0);
426
427 if (reuse_vma(new_plane_state, old_plane_state))
428 return 0;
429
430 /* We reject creating !SCANOUT fb's, so this is weird.. */
431 drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
432
433 vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment);
434
435 if (IS_ERR(vma))
436 return PTR_ERR(vma);
437
438 new_plane_state->ggtt_vma = vma;
439 return 0;
440 }
441
intel_plane_unpin_fb(struct intel_plane_state * old_plane_state)442 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
443 {
444 __xe_unpin_fb_vma(old_plane_state->ggtt_vma);
445 old_plane_state->ggtt_vma = NULL;
446 }
447
448 /*
449 * For Xe introduce dummy intel_dpt_create which just return NULL,
450 * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0;
451 */
intel_dpt_create(struct intel_framebuffer * fb)452 struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
453 {
454 return NULL;
455 }
456
intel_dpt_destroy(struct i915_address_space * vm)457 void intel_dpt_destroy(struct i915_address_space *vm)
458 {
459 return;
460 }
461
intel_dpt_offset(struct i915_vma * dpt_vma)462 u64 intel_dpt_offset(struct i915_vma *dpt_vma)
463 {
464 return 0;
465 }
466