xref: /linux/drivers/gpu/drm/xe/xe_pt.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <linux/dma-fence-array.h>
7 
8 #include "xe_pt.h"
9 
10 #include "regs/xe_gtt_defs.h"
11 #include "xe_bo.h"
12 #include "xe_device.h"
13 #include "xe_drm_client.h"
14 #include "xe_exec_queue.h"
15 #include "xe_gt.h"
16 #include "xe_gt_tlb_invalidation.h"
17 #include "xe_migrate.h"
18 #include "xe_pt_types.h"
19 #include "xe_pt_walk.h"
20 #include "xe_res_cursor.h"
21 #include "xe_sched_job.h"
22 #include "xe_sync.h"
23 #include "xe_svm.h"
24 #include "xe_trace.h"
25 #include "xe_ttm_stolen_mgr.h"
26 #include "xe_vm.h"
27 
28 struct xe_pt_dir {
29 	struct xe_pt pt;
30 	/** @children: Array of page-table child nodes */
31 	struct xe_ptw *children[XE_PDES];
32 	/** @staging: Array of page-table staging nodes */
33 	struct xe_ptw *staging[XE_PDES];
34 };
35 
36 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
37 #define xe_pt_set_addr(__xe_pt, __addr) ((__xe_pt)->addr = (__addr))
38 #define xe_pt_addr(__xe_pt) ((__xe_pt)->addr)
39 #else
40 #define xe_pt_set_addr(__xe_pt, __addr)
41 #define xe_pt_addr(__xe_pt) 0ull
42 #endif
43 
44 static const u64 xe_normal_pt_shifts[] = {12, 21, 30, 39, 48};
45 static const u64 xe_compact_pt_shifts[] = {16, 21, 30, 39, 48};
46 
47 #define XE_PT_HIGHEST_LEVEL (ARRAY_SIZE(xe_normal_pt_shifts) - 1)
48 
as_xe_pt_dir(struct xe_pt * pt)49 static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
50 {
51 	return container_of(pt, struct xe_pt_dir, pt);
52 }
53 
54 static struct xe_pt *
xe_pt_entry_staging(struct xe_pt_dir * pt_dir,unsigned int index)55 xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
56 {
57 	return container_of(pt_dir->staging[index], struct xe_pt, base);
58 }
59 
__xe_pt_empty_pte(struct xe_tile * tile,struct xe_vm * vm,unsigned int level)60 static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
61 			     unsigned int level)
62 {
63 	struct xe_device *xe = tile_to_xe(tile);
64 	u16 pat_index = xe->pat.idx[XE_CACHE_WB];
65 	u8 id = tile->id;
66 
67 	if (!xe_vm_has_scratch(vm))
68 		return 0;
69 
70 	if (level > MAX_HUGEPTE_LEVEL)
71 		return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
72 						 0, pat_index);
73 
74 	return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
75 		XE_PTE_NULL;
76 }
77 
xe_pt_free(struct xe_pt * pt)78 static void xe_pt_free(struct xe_pt *pt)
79 {
80 	if (pt->level)
81 		kfree(as_xe_pt_dir(pt));
82 	else
83 		kfree(pt);
84 }
85 
86 /**
87  * xe_pt_create() - Create a page-table.
88  * @vm: The vm to create for.
89  * @tile: The tile to create for.
90  * @level: The page-table level.
91  *
92  * Allocate and initialize a single struct xe_pt metadata structure. Also
93  * create the corresponding page-table bo, but don't initialize it. If the
94  * level is grater than zero, then it's assumed to be a directory page-
95  * table and the directory structure is also allocated and initialized to
96  * NULL pointers.
97  *
98  * Return: A valid struct xe_pt pointer on success, Pointer error code on
99  * error.
100  */
xe_pt_create(struct xe_vm * vm,struct xe_tile * tile,unsigned int level)101 struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
102 			   unsigned int level)
103 {
104 	struct xe_pt *pt;
105 	struct xe_bo *bo;
106 	u32 bo_flags;
107 	int err;
108 
109 	if (level) {
110 		struct xe_pt_dir *dir = kzalloc(sizeof(*dir), GFP_KERNEL);
111 
112 		pt = (dir) ? &dir->pt : NULL;
113 	} else {
114 		pt = kzalloc(sizeof(*pt), GFP_KERNEL);
115 	}
116 	if (!pt)
117 		return ERR_PTR(-ENOMEM);
118 
119 	bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
120 		   XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
121 		   XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
122 	if (vm->xef) /* userspace */
123 		bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
124 
125 	pt->level = level;
126 	bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
127 				  ttm_bo_type_kernel,
128 				  bo_flags);
129 	if (IS_ERR(bo)) {
130 		err = PTR_ERR(bo);
131 		goto err_kfree;
132 	}
133 	pt->bo = bo;
134 	pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
135 	pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
136 
137 	if (vm->xef)
138 		xe_drm_client_add_bo(vm->xef->client, pt->bo);
139 	xe_tile_assert(tile, level <= XE_VM_MAX_LEVEL);
140 
141 	return pt;
142 
143 err_kfree:
144 	xe_pt_free(pt);
145 	return ERR_PTR(err);
146 }
147 ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO);
148 
149 /**
150  * xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
151  * entries.
152  * @tile: The tile the scratch pagetable of which to use.
153  * @vm: The vm we populate for.
154  * @pt: The pagetable the bo of which to initialize.
155  *
156  * Populate the page-table bo of @pt with entries pointing into the tile's
157  * scratch page-table tree if any. Otherwise populate with zeros.
158  */
xe_pt_populate_empty(struct xe_tile * tile,struct xe_vm * vm,struct xe_pt * pt)159 void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
160 			  struct xe_pt *pt)
161 {
162 	struct iosys_map *map = &pt->bo->vmap;
163 	u64 empty;
164 	int i;
165 
166 	if (!xe_vm_has_scratch(vm)) {
167 		/*
168 		 * FIXME: Some memory is allocated already allocated to zero?
169 		 * Find out which memory that is and avoid this memset...
170 		 */
171 		xe_map_memset(vm->xe, map, 0, 0, SZ_4K);
172 	} else {
173 		empty = __xe_pt_empty_pte(tile, vm, pt->level);
174 		for (i = 0; i < XE_PDES; i++)
175 			xe_pt_write(vm->xe, map, i, empty);
176 	}
177 }
178 
179 /**
180  * xe_pt_shift() - Return the ilog2 value of the size of the address range of
181  * a page-table at a certain level.
182  * @level: The level.
183  *
184  * Return: The ilog2 value of the size of the address range of a page-table
185  * at level @level.
186  */
xe_pt_shift(unsigned int level)187 unsigned int xe_pt_shift(unsigned int level)
188 {
189 	return XE_PTE_SHIFT + XE_PDE_SHIFT * level;
190 }
191 
192 /**
193  * xe_pt_destroy() - Destroy a page-table tree.
194  * @pt: The root of the page-table tree to destroy.
195  * @flags: vm flags. Currently unused.
196  * @deferred: List head of lockless list for deferred putting. NULL for
197  *            immediate putting.
198  *
199  * Puts the page-table bo, recursively calls xe_pt_destroy on all children
200  * and finally frees @pt. TODO: Can we remove the @flags argument?
201  */
xe_pt_destroy(struct xe_pt * pt,u32 flags,struct llist_head * deferred)202 void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
203 {
204 	int i;
205 
206 	if (!pt)
207 		return;
208 
209 	XE_WARN_ON(!list_empty(&pt->bo->ttm.base.gpuva.list));
210 	xe_bo_unpin(pt->bo);
211 	xe_bo_put_deferred(pt->bo, deferred);
212 
213 	if (pt->level > 0 && pt->num_live) {
214 		struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
215 
216 		for (i = 0; i < XE_PDES; i++) {
217 			if (xe_pt_entry_staging(pt_dir, i))
218 				xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
219 					      deferred);
220 		}
221 	}
222 	xe_pt_free(pt);
223 }
224 
225 /**
226  * xe_pt_clear() - Clear a page-table.
227  * @xe: xe device.
228  * @pt: The page-table.
229  *
230  * Clears page-table by setting to zero.
231  */
xe_pt_clear(struct xe_device * xe,struct xe_pt * pt)232 void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
233 {
234 	struct iosys_map *map = &pt->bo->vmap;
235 
236 	xe_map_memset(xe, map, 0, 0, SZ_4K);
237 }
238 
239 /**
240  * DOC: Pagetable building
241  *
242  * Below we use the term "page-table" for both page-directories, containing
243  * pointers to lower level page-directories or page-tables, and level 0
244  * page-tables that contain only page-table-entries pointing to memory pages.
245  *
246  * When inserting an address range in an already existing page-table tree
247  * there will typically be a set of page-tables that are shared with other
248  * address ranges, and a set that are private to this address range.
249  * The set of shared page-tables can be at most two per level,
250  * and those can't be updated immediately because the entries of those
251  * page-tables may still be in use by the gpu for other mappings. Therefore
252  * when inserting entries into those, we instead stage those insertions by
253  * adding insertion data into struct xe_vm_pgtable_update structures. This
254  * data, (subtrees for the cpu and page-table-entries for the gpu) is then
255  * added in a separate commit step. CPU-data is committed while still under the
256  * vm lock, the object lock and for userptr, the notifier lock in read mode.
257  * The GPU async data is committed either by the GPU or CPU after fulfilling
258  * relevant dependencies.
259  * For non-shared page-tables (and, in fact, for shared ones that aren't
260  * existing at the time of staging), we add the data in-place without the
261  * special update structures. This private part of the page-table tree will
262  * remain disconnected from the vm page-table tree until data is committed to
263  * the shared page tables of the vm tree in the commit phase.
264  */
265 
266 struct xe_pt_update {
267 	/** @update: The update structure we're building for this parent. */
268 	struct xe_vm_pgtable_update *update;
269 	/** @parent: The parent. Used to detect a parent change. */
270 	struct xe_pt *parent;
271 	/** @preexisting: Whether the parent was pre-existing or allocated */
272 	bool preexisting;
273 };
274 
275 /**
276  * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk.
277  */
278 struct xe_pt_stage_bind_walk {
279 	/** @base: The base class. */
280 	struct xe_pt_walk base;
281 
282 	/* Input parameters for the walk */
283 	/** @vm: The vm we're building for. */
284 	struct xe_vm *vm;
285 	/** @tile: The tile we're building for. */
286 	struct xe_tile *tile;
287 	/** @default_vram_pte: PTE flag only template for VRAM. No address is associated */
288 	u64 default_vram_pte;
289 	/** @default_system_pte: PTE flag only template for System. No address is associated */
290 	u64 default_system_pte;
291 	/** @dma_offset: DMA offset to add to the PTE. */
292 	u64 dma_offset;
293 	/**
294 	 * @needs_64K: This address range enforces 64K alignment and
295 	 * granularity on VRAM.
296 	 */
297 	bool needs_64K;
298 	/** @clear_pt: clear page table entries during the bind walk */
299 	bool clear_pt;
300 	/**
301 	 * @vma: VMA being mapped
302 	 */
303 	struct xe_vma *vma;
304 
305 	/* Also input, but is updated during the walk*/
306 	/** @curs: The DMA address cursor. */
307 	struct xe_res_cursor *curs;
308 	/** @va_curs_start: The Virtual address corresponding to @curs->start */
309 	u64 va_curs_start;
310 
311 	/* Output */
312 	/** @wupd: Walk output data for page-table updates. */
313 	struct xe_walk_update {
314 		/** @wupd.entries: Caller provided storage. */
315 		struct xe_vm_pgtable_update *entries;
316 		/** @wupd.num_used_entries: Number of update @entries used. */
317 		unsigned int num_used_entries;
318 		/** @wupd.updates: Tracks the update entry at a given level */
319 		struct xe_pt_update updates[XE_VM_MAX_LEVEL + 1];
320 	} wupd;
321 
322 	/* Walk state */
323 	/**
324 	 * @l0_end_addr: The end address of the current l0 leaf. Used for
325 	 * 64K granularity detection.
326 	 */
327 	u64 l0_end_addr;
328 	/** @addr_64K: The start address of the current 64K chunk. */
329 	u64 addr_64K;
330 	/** @found_64K: Whether @add_64K actually points to a 64K chunk. */
331 	bool found_64K;
332 };
333 
334 static int
xe_pt_new_shared(struct xe_walk_update * wupd,struct xe_pt * parent,pgoff_t offset,bool alloc_entries)335 xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
336 		 pgoff_t offset, bool alloc_entries)
337 {
338 	struct xe_pt_update *upd = &wupd->updates[parent->level];
339 	struct xe_vm_pgtable_update *entry;
340 
341 	/*
342 	 * For *each level*, we could only have one active
343 	 * struct xt_pt_update at any one time. Once we move on to a
344 	 * new parent and page-directory, the old one is complete, and
345 	 * updates are either already stored in the build tree or in
346 	 * @wupd->entries
347 	 */
348 	if (likely(upd->parent == parent))
349 		return 0;
350 
351 	upd->parent = parent;
352 	upd->preexisting = true;
353 
354 	if (wupd->num_used_entries == XE_VM_MAX_LEVEL * 2 + 1)
355 		return -EINVAL;
356 
357 	entry = wupd->entries + wupd->num_used_entries++;
358 	upd->update = entry;
359 	entry->ofs = offset;
360 	entry->pt_bo = parent->bo;
361 	entry->pt = parent;
362 	entry->flags = 0;
363 	entry->qwords = 0;
364 	entry->pt_bo->update_index = -1;
365 
366 	if (alloc_entries) {
367 		entry->pt_entries = kmalloc_array(XE_PDES,
368 						  sizeof(*entry->pt_entries),
369 						  GFP_KERNEL);
370 		if (!entry->pt_entries)
371 			return -ENOMEM;
372 	}
373 
374 	return 0;
375 }
376 
377 /*
378  * NOTE: This is a very frequently called function so we allow ourselves
379  * to annotate (using branch prediction hints) the fastpath of updating a
380  * non-pre-existing pagetable with leaf ptes.
381  */
382 static int
xe_pt_insert_entry(struct xe_pt_stage_bind_walk * xe_walk,struct xe_pt * parent,pgoff_t offset,struct xe_pt * xe_child,u64 pte)383 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
384 		   pgoff_t offset, struct xe_pt *xe_child, u64 pte)
385 {
386 	struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level];
387 	struct xe_pt_update *child_upd = xe_child ?
388 		&xe_walk->wupd.updates[xe_child->level] : NULL;
389 	int ret;
390 
391 	ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true);
392 	if (unlikely(ret))
393 		return ret;
394 
395 	/*
396 	 * Register this new pagetable so that it won't be recognized as
397 	 * a shared pagetable by a subsequent insertion.
398 	 */
399 	if (unlikely(child_upd)) {
400 		child_upd->update = NULL;
401 		child_upd->parent = xe_child;
402 		child_upd->preexisting = false;
403 	}
404 
405 	if (likely(!upd->preexisting)) {
406 		/* Continue building a non-connected subtree. */
407 		struct iosys_map *map = &parent->bo->vmap;
408 
409 		if (unlikely(xe_child)) {
410 			parent->base.children[offset] = &xe_child->base;
411 			parent->base.staging[offset] = &xe_child->base;
412 		}
413 
414 		xe_pt_write(xe_walk->vm->xe, map, offset, pte);
415 		parent->num_live++;
416 	} else {
417 		/* Shared pt. Stage update. */
418 		unsigned int idx;
419 		struct xe_vm_pgtable_update *entry = upd->update;
420 
421 		idx = offset - entry->ofs;
422 		entry->pt_entries[idx].pt = xe_child;
423 		entry->pt_entries[idx].pte = pte;
424 		entry->qwords++;
425 	}
426 
427 	return 0;
428 }
429 
xe_pt_hugepte_possible(u64 addr,u64 next,unsigned int level,struct xe_pt_stage_bind_walk * xe_walk)430 static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
431 				   struct xe_pt_stage_bind_walk *xe_walk)
432 {
433 	u64 size, dma;
434 
435 	if (level > MAX_HUGEPTE_LEVEL)
436 		return false;
437 
438 	/* Does the virtual range requested cover a huge pte? */
439 	if (!xe_pt_covers(addr, next, level, &xe_walk->base))
440 		return false;
441 
442 	/* Does the DMA segment cover the whole pte? */
443 	if (next - xe_walk->va_curs_start > xe_walk->curs->size)
444 		return false;
445 
446 	/* null VMA's do not have dma addresses */
447 	if (xe_vma_is_null(xe_walk->vma))
448 		return true;
449 
450 	/* if we are clearing page table, no dma addresses*/
451 	if (xe_walk->clear_pt)
452 		return true;
453 
454 	/* Is the DMA address huge PTE size aligned? */
455 	size = next - addr;
456 	dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
457 
458 	return IS_ALIGNED(dma, size);
459 }
460 
461 /*
462  * Scan the requested mapping to check whether it can be done entirely
463  * with 64K PTEs.
464  */
465 static bool
xe_pt_scan_64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)466 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
467 {
468 	struct xe_res_cursor curs = *xe_walk->curs;
469 
470 	if (!IS_ALIGNED(addr, SZ_64K))
471 		return false;
472 
473 	if (next > xe_walk->l0_end_addr)
474 		return false;
475 
476 	/* null VMA's do not have dma addresses */
477 	if (xe_vma_is_null(xe_walk->vma))
478 		return true;
479 
480 	xe_res_next(&curs, addr - xe_walk->va_curs_start);
481 	for (; addr < next; addr += SZ_64K) {
482 		if (!IS_ALIGNED(xe_res_dma(&curs), SZ_64K) || curs.size < SZ_64K)
483 			return false;
484 
485 		xe_res_next(&curs, SZ_64K);
486 	}
487 
488 	return addr == next;
489 }
490 
491 /*
492  * For non-compact "normal" 4K level-0 pagetables, we want to try to group
493  * addresses together in 64K-contigous regions to add a 64K TLB hint for the
494  * device to the PTE.
495  * This function determines whether the address is part of such a
496  * segment. For VRAM in normal pagetables, this is strictly necessary on
497  * some devices.
498  */
499 static bool
xe_pt_is_pte_ps64K(u64 addr,u64 next,struct xe_pt_stage_bind_walk * xe_walk)500 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk)
501 {
502 	/* Address is within an already found 64k region */
503 	if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K)
504 		return true;
505 
506 	xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk);
507 	xe_walk->addr_64K = addr;
508 
509 	return xe_walk->found_64K;
510 }
511 
512 static int
xe_pt_stage_bind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)513 xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
514 		       unsigned int level, u64 addr, u64 next,
515 		       struct xe_ptw **child,
516 		       enum page_walk_action *action,
517 		       struct xe_pt_walk *walk)
518 {
519 	struct xe_pt_stage_bind_walk *xe_walk =
520 		container_of(walk, typeof(*xe_walk), base);
521 	u16 pat_index = xe_walk->vma->pat_index;
522 	struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
523 	struct xe_vm *vm = xe_walk->vm;
524 	struct xe_pt *xe_child;
525 	bool covers;
526 	int ret = 0;
527 	u64 pte;
528 
529 	/* Is this a leaf entry ?*/
530 	if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
531 		struct xe_res_cursor *curs = xe_walk->curs;
532 		bool is_null = xe_vma_is_null(xe_walk->vma);
533 		bool is_vram = is_null ? false : xe_res_is_vram(curs);
534 
535 		XE_WARN_ON(xe_walk->va_curs_start != addr);
536 
537 		if (xe_walk->clear_pt) {
538 			pte = 0;
539 		} else {
540 			pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
541 							 xe_res_dma(curs) +
542 							 xe_walk->dma_offset,
543 							 xe_walk->vma,
544 							 pat_index, level);
545 			if (!is_null)
546 				pte |= is_vram ? xe_walk->default_vram_pte :
547 					xe_walk->default_system_pte;
548 
549 			/*
550 			 * Set the XE_PTE_PS64 hint if possible, otherwise if
551 			 * this device *requires* 64K PTE size for VRAM, fail.
552 			 */
553 			if (level == 0 && !xe_parent->is_compact) {
554 				if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
555 					xe_walk->vma->gpuva.flags |=
556 							XE_VMA_PTE_64K;
557 					pte |= XE_PTE_PS64;
558 				} else if (XE_WARN_ON(xe_walk->needs_64K &&
559 					   is_vram)) {
560 					return -EINVAL;
561 				}
562 			}
563 		}
564 
565 		ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
566 		if (unlikely(ret))
567 			return ret;
568 
569 		if (!is_null && !xe_walk->clear_pt)
570 			xe_res_next(curs, next - addr);
571 		xe_walk->va_curs_start = next;
572 		xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
573 		*action = ACTION_CONTINUE;
574 
575 		return ret;
576 	}
577 
578 	/*
579 	 * Descending to lower level. Determine if we need to allocate a
580 	 * new page table or -directory, which we do if there is no
581 	 * previous one or there is one we can completely replace.
582 	 */
583 	if (level == 1) {
584 		walk->shifts = xe_normal_pt_shifts;
585 		xe_walk->l0_end_addr = next;
586 	}
587 
588 	covers = xe_pt_covers(addr, next, level, &xe_walk->base);
589 	if (covers || !*child) {
590 		u64 flags = 0;
591 
592 		xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
593 		if (IS_ERR(xe_child))
594 			return PTR_ERR(xe_child);
595 
596 		xe_pt_set_addr(xe_child,
597 			       round_down(addr, 1ull << walk->shifts[level]));
598 
599 		if (!covers)
600 			xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child);
601 
602 		*child = &xe_child->base;
603 
604 		/*
605 		 * Prefer the compact pagetable layout for L0 if possible. Only
606 		 * possible if VMA covers entire 2MB region as compact 64k and
607 		 * 4k pages cannot be mixed within a 2MB region.
608 		 * TODO: Suballocate the pt bo to avoid wasting a lot of
609 		 * memory.
610 		 */
611 		if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
612 		    covers && xe_pt_scan_64K(addr, next, xe_walk)) {
613 			walk->shifts = xe_compact_pt_shifts;
614 			xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
615 			flags |= XE_PDE_64K;
616 			xe_child->is_compact = true;
617 		}
618 
619 		pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
620 		ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
621 					 pte);
622 	}
623 
624 	*action = ACTION_SUBTREE;
625 	return ret;
626 }
627 
628 static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
629 	.pt_entry = xe_pt_stage_bind_entry,
630 };
631 
632 /*
633  * Default atomic expectations for different allocation scenarios are as follows:
634  *
635  * 1. Traditional API: When the VM is not in LR mode:
636  *    - Device atomics are expected to function with all allocations.
637  *
638  * 2. Compute/SVM API: When the VM is in LR mode:
639  *    - Device atomics are the default behavior when the bo is placed in a single region.
640  *    - In all other cases device atomics will be disabled with AE=0 until an application
641  *      request differently using a ioctl like madvise.
642  */
xe_atomic_for_vram(struct xe_vm * vm)643 static bool xe_atomic_for_vram(struct xe_vm *vm)
644 {
645 	return true;
646 }
647 
xe_atomic_for_system(struct xe_vm * vm,struct xe_bo * bo)648 static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
649 {
650 	struct xe_device *xe = vm->xe;
651 
652 	if (!xe->info.has_device_atomics_on_smem)
653 		return false;
654 
655 	/*
656 	 * If a SMEM+LMEM allocation is backed by SMEM, a device
657 	 * atomics will cause a gpu page fault and which then
658 	 * gets migrated to LMEM, bind such allocations with
659 	 * device atomics enabled.
660 	 *
661 	 * TODO: Revisit this. Perhaps add something like a
662 	 * fault_on_atomics_in_system UAPI flag.
663 	 * Note that this also prohibits GPU atomics in LR mode for
664 	 * userptr and system memory on DGFX.
665 	 */
666 	return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
667 				 (bo && xe_bo_has_single_placement(bo))));
668 }
669 
670 /**
671  * xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
672  * range.
673  * @tile: The tile we're building for.
674  * @vma: The vma indicating the address range.
675  * @range: The range indicating the address range.
676  * @entries: Storage for the update entries used for connecting the tree to
677  * the main tree at commit time.
678  * @num_entries: On output contains the number of @entries used.
679  * @clear_pt: Clear the page table entries.
680  *
681  * This function builds a disconnected page-table tree for a given address
682  * range. The tree is connected to the main vm tree for the gpu using
683  * xe_migrate_update_pgtables() and for the cpu using xe_pt_commit_bind().
684  * The function builds xe_vm_pgtable_update structures for already existing
685  * shared page-tables, and non-existing shared and non-shared page-tables
686  * are built and populated directly.
687  *
688  * Return 0 on success, negative error code on error.
689  */
690 static int
xe_pt_stage_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_svm_range * range,struct xe_vm_pgtable_update * entries,u32 * num_entries,bool clear_pt)691 xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
692 		 struct xe_svm_range *range,
693 		 struct xe_vm_pgtable_update *entries,
694 		 u32 *num_entries, bool clear_pt)
695 {
696 	struct xe_device *xe = tile_to_xe(tile);
697 	struct xe_bo *bo = xe_vma_bo(vma);
698 	struct xe_res_cursor curs;
699 	struct xe_vm *vm = xe_vma_vm(vma);
700 	struct xe_pt_stage_bind_walk xe_walk = {
701 		.base = {
702 			.ops = &xe_pt_stage_bind_ops,
703 			.shifts = xe_normal_pt_shifts,
704 			.max_level = XE_PT_HIGHEST_LEVEL,
705 			.staging = true,
706 		},
707 		.vm = vm,
708 		.tile = tile,
709 		.curs = &curs,
710 		.va_curs_start = range ? range->base.itree.start :
711 			xe_vma_start(vma),
712 		.vma = vma,
713 		.wupd.entries = entries,
714 		.clear_pt = clear_pt,
715 	};
716 	struct xe_pt *pt = vm->pt_root[tile->id];
717 	int ret;
718 
719 	if (range) {
720 		/* Move this entire thing to xe_svm.c? */
721 		xe_svm_notifier_lock(vm);
722 		if (!xe_svm_range_pages_valid(range)) {
723 			xe_svm_range_debug(range, "BIND PREPARE - RETRY");
724 			xe_svm_notifier_unlock(vm);
725 			return -EAGAIN;
726 		}
727 		if (xe_svm_range_has_dma_mapping(range)) {
728 			xe_res_first_dma(range->base.dma_addr, 0,
729 					 range->base.itree.last + 1 - range->base.itree.start,
730 					 &curs);
731 			xe_svm_range_debug(range, "BIND PREPARE - MIXED");
732 		} else {
733 			xe_assert(xe, false);
734 		}
735 		/*
736 		 * Note, when unlocking the resource cursor dma addresses may become
737 		 * stale, but the bind will be aborted anyway at commit time.
738 		 */
739 		xe_svm_notifier_unlock(vm);
740 	}
741 
742 	xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K);
743 	if (clear_pt)
744 		goto walk_pt;
745 
746 	if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
747 		xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
748 		xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
749 			XE_USM_PPGTT_PTE_AE : 0;
750 	}
751 
752 	xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM;
753 	xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
754 	if (!range)
755 		xe_bo_assert_held(bo);
756 
757 	if (!xe_vma_is_null(vma) && !range) {
758 		if (xe_vma_is_userptr(vma))
759 			xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
760 					xe_vma_size(vma), &curs);
761 		else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
762 			xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
763 				     xe_vma_size(vma), &curs);
764 		else
765 			xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
766 					xe_vma_size(vma), &curs);
767 	} else if (!range) {
768 		curs.size = xe_vma_size(vma);
769 	}
770 
771 walk_pt:
772 	ret = xe_pt_walk_range(&pt->base, pt->level,
773 			       range ? range->base.itree.start : xe_vma_start(vma),
774 			       range ? range->base.itree.last + 1 : xe_vma_end(vma),
775 			       &xe_walk.base);
776 
777 	*num_entries = xe_walk.wupd.num_used_entries;
778 	return ret;
779 }
780 
781 /**
782  * xe_pt_nonshared_offsets() - Determine the non-shared entry offsets of a
783  * shared pagetable.
784  * @addr: The start address within the non-shared pagetable.
785  * @end: The end address within the non-shared pagetable.
786  * @level: The level of the non-shared pagetable.
787  * @walk: Walk info. The function adjusts the walk action.
788  * @action: next action to perform (see enum page_walk_action)
789  * @offset: Ignored on input, First non-shared entry on output.
790  * @end_offset: Ignored on input, Last non-shared entry + 1 on output.
791  *
792  * A non-shared page-table has some entries that belong to the address range
793  * and others that don't. This function determines the entries that belong
794  * fully to the address range. Depending on level, some entries may
795  * partially belong to the address range (that can't happen at level 0).
796  * The function detects that and adjust those offsets to not include those
797  * partial entries. Iff it does detect partial entries, we know that there must
798  * be shared page tables also at lower levels, so it adjusts the walk action
799  * accordingly.
800  *
801  * Return: true if there were non-shared entries, false otherwise.
802  */
xe_pt_nonshared_offsets(u64 addr,u64 end,unsigned int level,struct xe_pt_walk * walk,enum page_walk_action * action,pgoff_t * offset,pgoff_t * end_offset)803 static bool xe_pt_nonshared_offsets(u64 addr, u64 end, unsigned int level,
804 				    struct xe_pt_walk *walk,
805 				    enum page_walk_action *action,
806 				    pgoff_t *offset, pgoff_t *end_offset)
807 {
808 	u64 size = 1ull << walk->shifts[level];
809 
810 	*offset = xe_pt_offset(addr, level, walk);
811 	*end_offset = xe_pt_num_entries(addr, end, level, walk) + *offset;
812 
813 	if (!level)
814 		return true;
815 
816 	/*
817 	 * If addr or next are not size aligned, there are shared pts at lower
818 	 * level, so in that case traverse down the subtree
819 	 */
820 	*action = ACTION_CONTINUE;
821 	if (!IS_ALIGNED(addr, size)) {
822 		*action = ACTION_SUBTREE;
823 		(*offset)++;
824 	}
825 
826 	if (!IS_ALIGNED(end, size)) {
827 		*action = ACTION_SUBTREE;
828 		(*end_offset)--;
829 	}
830 
831 	return *end_offset > *offset;
832 }
833 
834 struct xe_pt_zap_ptes_walk {
835 	/** @base: The walk base-class */
836 	struct xe_pt_walk base;
837 
838 	/* Input parameters for the walk */
839 	/** @tile: The tile we're building for */
840 	struct xe_tile *tile;
841 
842 	/* Output */
843 	/** @needs_invalidate: Whether we need to invalidate TLB*/
844 	bool needs_invalidate;
845 };
846 
xe_pt_zap_ptes_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)847 static int xe_pt_zap_ptes_entry(struct xe_ptw *parent, pgoff_t offset,
848 				unsigned int level, u64 addr, u64 next,
849 				struct xe_ptw **child,
850 				enum page_walk_action *action,
851 				struct xe_pt_walk *walk)
852 {
853 	struct xe_pt_zap_ptes_walk *xe_walk =
854 		container_of(walk, typeof(*xe_walk), base);
855 	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
856 	pgoff_t end_offset;
857 
858 	XE_WARN_ON(!*child);
859 	XE_WARN_ON(!level);
860 
861 	/*
862 	 * Note that we're called from an entry callback, and we're dealing
863 	 * with the child of that entry rather than the parent, so need to
864 	 * adjust level down.
865 	 */
866 	if (xe_pt_nonshared_offsets(addr, next, --level, walk, action, &offset,
867 				    &end_offset)) {
868 		xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap,
869 			      offset * sizeof(u64), 0,
870 			      (end_offset - offset) * sizeof(u64));
871 		xe_walk->needs_invalidate = true;
872 	}
873 
874 	return 0;
875 }
876 
877 static const struct xe_pt_walk_ops xe_pt_zap_ptes_ops = {
878 	.pt_entry = xe_pt_zap_ptes_entry,
879 };
880 
881 /**
882  * xe_pt_zap_ptes() - Zap (zero) gpu ptes of an address range
883  * @tile: The tile we're zapping for.
884  * @vma: GPU VMA detailing address range.
885  *
886  * Eviction and Userptr invalidation needs to be able to zap the
887  * gpu ptes of a given address range in pagefaulting mode.
888  * In order to be able to do that, that function needs access to the shared
889  * page-table entrieaso it can either clear the leaf PTEs or
890  * clear the pointers to lower-level page-tables. The caller is required
891  * to hold the necessary locks to ensure neither the page-table connectivity
892  * nor the page-table entries of the range is updated from under us.
893  *
894  * Return: Whether ptes were actually updated and a TLB invalidation is
895  * required.
896  */
xe_pt_zap_ptes(struct xe_tile * tile,struct xe_vma * vma)897 bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
898 {
899 	struct xe_pt_zap_ptes_walk xe_walk = {
900 		.base = {
901 			.ops = &xe_pt_zap_ptes_ops,
902 			.shifts = xe_normal_pt_shifts,
903 			.max_level = XE_PT_HIGHEST_LEVEL,
904 		},
905 		.tile = tile,
906 	};
907 	struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
908 	u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated);
909 
910 	if (xe_vma_bo(vma))
911 		xe_bo_assert_held(xe_vma_bo(vma));
912 	else if (xe_vma_is_userptr(vma))
913 		lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
914 
915 	if (!(pt_mask & BIT(tile->id)))
916 		return false;
917 
918 	(void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
919 				xe_vma_end(vma), &xe_walk.base);
920 
921 	return xe_walk.needs_invalidate;
922 }
923 
924 /**
925  * xe_pt_zap_ptes_range() - Zap (zero) gpu ptes of a SVM range
926  * @tile: The tile we're zapping for.
927  * @vm: The VM we're zapping for.
928  * @range: The SVM range we're zapping for.
929  *
930  * SVM invalidation needs to be able to zap the gpu ptes of a given address
931  * range. In order to be able to do that, that function needs access to the
932  * shared page-table entries so it can either clear the leaf PTEs or
933  * clear the pointers to lower-level page-tables. The caller is required
934  * to hold the SVM notifier lock.
935  *
936  * Return: Whether ptes were actually updated and a TLB invalidation is
937  * required.
938  */
xe_pt_zap_ptes_range(struct xe_tile * tile,struct xe_vm * vm,struct xe_svm_range * range)939 bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
940 			  struct xe_svm_range *range)
941 {
942 	struct xe_pt_zap_ptes_walk xe_walk = {
943 		.base = {
944 			.ops = &xe_pt_zap_ptes_ops,
945 			.shifts = xe_normal_pt_shifts,
946 			.max_level = XE_PT_HIGHEST_LEVEL,
947 		},
948 		.tile = tile,
949 	};
950 	struct xe_pt *pt = vm->pt_root[tile->id];
951 	u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
952 
953 	xe_svm_assert_in_notifier(vm);
954 
955 	if (!(pt_mask & BIT(tile->id)))
956 		return false;
957 
958 	(void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
959 				range->base.itree.last + 1, &xe_walk.base);
960 
961 	return xe_walk.needs_invalidate;
962 }
963 
964 static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * data,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)965 xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
966 		       struct iosys_map *map, void *data,
967 		       u32 qword_ofs, u32 num_qwords,
968 		       const struct xe_vm_pgtable_update *update)
969 {
970 	struct xe_pt_entry *ptes = update->pt_entries;
971 	u64 *ptr = data;
972 	u32 i;
973 
974 	for (i = 0; i < num_qwords; i++) {
975 		if (map)
976 			xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
977 				  sizeof(u64), u64, ptes[i].pte);
978 		else
979 			ptr[i] = ptes[i].pte;
980 	}
981 }
982 
xe_pt_cancel_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)983 static void xe_pt_cancel_bind(struct xe_vma *vma,
984 			      struct xe_vm_pgtable_update *entries,
985 			      u32 num_entries)
986 {
987 	u32 i, j;
988 
989 	for (i = 0; i < num_entries; i++) {
990 		struct xe_pt *pt = entries[i].pt;
991 
992 		if (!pt)
993 			continue;
994 
995 		if (pt->level) {
996 			for (j = 0; j < entries[i].qwords; j++)
997 				xe_pt_destroy(entries[i].pt_entries[j].pt,
998 					      xe_vma_vm(vma)->flags, NULL);
999 		}
1000 
1001 		kfree(entries[i].pt_entries);
1002 		entries[i].pt_entries = NULL;
1003 		entries[i].qwords = 0;
1004 	}
1005 }
1006 
1007 #define XE_INVALID_VMA	((struct xe_vma *)(0xdeaddeadull))
1008 
xe_pt_commit_prepare_locks_assert(struct xe_vma * vma)1009 static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
1010 {
1011 	struct xe_vm *vm;
1012 
1013 	if (vma == XE_INVALID_VMA)
1014 		return;
1015 
1016 	vm = xe_vma_vm(vma);
1017 	lockdep_assert_held(&vm->lock);
1018 
1019 	if (!xe_vma_has_no_bo(vma))
1020 		dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
1021 
1022 	xe_vm_assert_held(vm);
1023 }
1024 
xe_pt_commit_locks_assert(struct xe_vma * vma)1025 static void xe_pt_commit_locks_assert(struct xe_vma *vma)
1026 {
1027 	struct xe_vm *vm;
1028 
1029 	if (vma == XE_INVALID_VMA)
1030 		return;
1031 
1032 	vm = xe_vma_vm(vma);
1033 	xe_pt_commit_prepare_locks_assert(vma);
1034 
1035 	if (xe_vma_is_userptr(vma))
1036 		lockdep_assert_held_read(&vm->userptr.notifier_lock);
1037 }
1038 
xe_pt_commit(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,struct llist_head * deferred)1039 static void xe_pt_commit(struct xe_vma *vma,
1040 			 struct xe_vm_pgtable_update *entries,
1041 			 u32 num_entries, struct llist_head *deferred)
1042 {
1043 	u32 i, j;
1044 
1045 	xe_pt_commit_locks_assert(vma);
1046 
1047 	for (i = 0; i < num_entries; i++) {
1048 		struct xe_pt *pt = entries[i].pt;
1049 		struct xe_pt_dir *pt_dir;
1050 
1051 		if (!pt->level)
1052 			continue;
1053 
1054 		pt_dir = as_xe_pt_dir(pt);
1055 		for (j = 0; j < entries[i].qwords; j++) {
1056 			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
1057 			int j_ = j + entries[i].ofs;
1058 
1059 			pt_dir->children[j_] = pt_dir->staging[j_];
1060 			xe_pt_destroy(oldpte, (vma == XE_INVALID_VMA) ? 0 :
1061 				      xe_vma_vm(vma)->flags, deferred);
1062 		}
1063 	}
1064 }
1065 
xe_pt_abort_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,bool rebind)1066 static void xe_pt_abort_bind(struct xe_vma *vma,
1067 			     struct xe_vm_pgtable_update *entries,
1068 			     u32 num_entries, bool rebind)
1069 {
1070 	int i, j;
1071 
1072 	xe_pt_commit_prepare_locks_assert(vma);
1073 
1074 	for (i = num_entries - 1; i >= 0; --i) {
1075 		struct xe_pt *pt = entries[i].pt;
1076 		struct xe_pt_dir *pt_dir;
1077 
1078 		if (!rebind)
1079 			pt->num_live -= entries[i].qwords;
1080 
1081 		if (!pt->level)
1082 			continue;
1083 
1084 		pt_dir = as_xe_pt_dir(pt);
1085 		for (j = 0; j < entries[i].qwords; j++) {
1086 			u32 j_ = j + entries[i].ofs;
1087 			struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
1088 			struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
1089 
1090 			pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
1091 			xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
1092 		}
1093 	}
1094 }
1095 
xe_pt_commit_prepare_bind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries,bool rebind)1096 static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
1097 				      struct xe_vm_pgtable_update *entries,
1098 				      u32 num_entries, bool rebind)
1099 {
1100 	u32 i, j;
1101 
1102 	xe_pt_commit_prepare_locks_assert(vma);
1103 
1104 	for (i = 0; i < num_entries; i++) {
1105 		struct xe_pt *pt = entries[i].pt;
1106 		struct xe_pt_dir *pt_dir;
1107 
1108 		if (!rebind)
1109 			pt->num_live += entries[i].qwords;
1110 
1111 		if (!pt->level)
1112 			continue;
1113 
1114 		pt_dir = as_xe_pt_dir(pt);
1115 		for (j = 0; j < entries[i].qwords; j++) {
1116 			u32 j_ = j + entries[i].ofs;
1117 			struct xe_pt *newpte = entries[i].pt_entries[j].pt;
1118 			struct xe_pt *oldpte = NULL;
1119 
1120 			if (xe_pt_entry_staging(pt_dir, j_))
1121 				oldpte = xe_pt_entry_staging(pt_dir, j_);
1122 
1123 			pt_dir->staging[j_] = &newpte->base;
1124 			entries[i].pt_entries[j].pt = oldpte;
1125 		}
1126 	}
1127 }
1128 
xe_pt_free_bind(struct xe_vm_pgtable_update * entries,u32 num_entries)1129 static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
1130 			    u32 num_entries)
1131 {
1132 	u32 i;
1133 
1134 	for (i = 0; i < num_entries; i++)
1135 		kfree(entries[i].pt_entries);
1136 }
1137 
1138 static int
xe_pt_prepare_bind(struct xe_tile * tile,struct xe_vma * vma,struct xe_svm_range * range,struct xe_vm_pgtable_update * entries,u32 * num_entries,bool invalidate_on_bind)1139 xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
1140 		   struct xe_svm_range *range,
1141 		   struct xe_vm_pgtable_update *entries,
1142 		   u32 *num_entries, bool invalidate_on_bind)
1143 {
1144 	int err;
1145 
1146 	*num_entries = 0;
1147 	err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
1148 			       invalidate_on_bind);
1149 	if (!err)
1150 		xe_tile_assert(tile, *num_entries);
1151 
1152 	return err;
1153 }
1154 
xe_vm_dbg_print_entries(struct xe_device * xe,const struct xe_vm_pgtable_update * entries,unsigned int num_entries,bool bind)1155 static void xe_vm_dbg_print_entries(struct xe_device *xe,
1156 				    const struct xe_vm_pgtable_update *entries,
1157 				    unsigned int num_entries, bool bind)
1158 #if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
1159 {
1160 	unsigned int i;
1161 
1162 	vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
1163 	       num_entries);
1164 	for (i = 0; i < num_entries; i++) {
1165 		const struct xe_vm_pgtable_update *entry = &entries[i];
1166 		struct xe_pt *xe_pt = entry->pt;
1167 		u64 page_size = 1ull << xe_pt_shift(xe_pt->level);
1168 		u64 end;
1169 		u64 start;
1170 
1171 		xe_assert(xe, !entry->pt->is_compact);
1172 		start = entry->ofs * page_size;
1173 		end = start + page_size * entry->qwords;
1174 		vm_dbg(&xe->drm,
1175 		       "\t%u: Update level %u at (%u + %u) [%llx...%llx) f:%x\n",
1176 		       i, xe_pt->level, entry->ofs, entry->qwords,
1177 		       xe_pt_addr(xe_pt) + start, xe_pt_addr(xe_pt) + end, 0);
1178 	}
1179 }
1180 #else
1181 {}
1182 #endif
1183 
no_in_syncs(struct xe_sync_entry * syncs,u32 num_syncs)1184 static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
1185 {
1186 	int i;
1187 
1188 	for (i = 0; i < num_syncs; i++) {
1189 		struct dma_fence *fence = syncs[i].fence;
1190 
1191 		if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1192 				       &fence->flags))
1193 			return false;
1194 	}
1195 
1196 	return true;
1197 }
1198 
job_test_add_deps(struct xe_sched_job * job,struct dma_resv * resv,enum dma_resv_usage usage)1199 static int job_test_add_deps(struct xe_sched_job *job,
1200 			     struct dma_resv *resv,
1201 			     enum dma_resv_usage usage)
1202 {
1203 	if (!job) {
1204 		if (!dma_resv_test_signaled(resv, usage))
1205 			return -ETIME;
1206 
1207 		return 0;
1208 	}
1209 
1210 	return xe_sched_job_add_deps(job, resv, usage);
1211 }
1212 
vma_add_deps(struct xe_vma * vma,struct xe_sched_job * job)1213 static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
1214 {
1215 	struct xe_bo *bo = xe_vma_bo(vma);
1216 
1217 	xe_bo_assert_held(bo);
1218 
1219 	if (bo && !bo->vm)
1220 		return job_test_add_deps(job, bo->ttm.base.resv,
1221 					 DMA_RESV_USAGE_KERNEL);
1222 
1223 	return 0;
1224 }
1225 
op_add_deps(struct xe_vm * vm,struct xe_vma_op * op,struct xe_sched_job * job)1226 static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
1227 		       struct xe_sched_job *job)
1228 {
1229 	int err = 0;
1230 
1231 	/*
1232 	 * No need to check for is_cpu_addr_mirror here as vma_add_deps is a
1233 	 * NOP if VMA is_cpu_addr_mirror
1234 	 */
1235 
1236 	switch (op->base.op) {
1237 	case DRM_GPUVA_OP_MAP:
1238 		if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1239 			break;
1240 
1241 		err = vma_add_deps(op->map.vma, job);
1242 		break;
1243 	case DRM_GPUVA_OP_REMAP:
1244 		if (op->remap.prev)
1245 			err = vma_add_deps(op->remap.prev, job);
1246 		if (!err && op->remap.next)
1247 			err = vma_add_deps(op->remap.next, job);
1248 		break;
1249 	case DRM_GPUVA_OP_UNMAP:
1250 		break;
1251 	case DRM_GPUVA_OP_PREFETCH:
1252 		err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
1253 		break;
1254 	case DRM_GPUVA_OP_DRIVER:
1255 		break;
1256 	default:
1257 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1258 	}
1259 
1260 	return err;
1261 }
1262 
xe_pt_vm_dependencies(struct xe_sched_job * job,struct xe_vm * vm,struct xe_vma_ops * vops,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_range_fence_tree * rftree)1263 static int xe_pt_vm_dependencies(struct xe_sched_job *job,
1264 				 struct xe_vm *vm,
1265 				 struct xe_vma_ops *vops,
1266 				 struct xe_vm_pgtable_update_ops *pt_update_ops,
1267 				 struct xe_range_fence_tree *rftree)
1268 {
1269 	struct xe_range_fence *rtfence;
1270 	struct dma_fence *fence;
1271 	struct xe_vma_op *op;
1272 	int err = 0, i;
1273 
1274 	xe_vm_assert_held(vm);
1275 
1276 	if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
1277 		return -ETIME;
1278 
1279 	if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
1280 		return -ETIME;
1281 
1282 	if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
1283 		err = job_test_add_deps(job, xe_vm_resv(vm),
1284 					pt_update_ops->wait_vm_bookkeep ?
1285 					DMA_RESV_USAGE_BOOKKEEP :
1286 					DMA_RESV_USAGE_KERNEL);
1287 		if (err)
1288 			return err;
1289 	}
1290 
1291 	rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
1292 					    pt_update_ops->last);
1293 	while (rtfence) {
1294 		fence = rtfence->fence;
1295 
1296 		if (!dma_fence_is_signaled(fence)) {
1297 			/*
1298 			 * Is this a CPU update? GPU is busy updating, so return
1299 			 * an error
1300 			 */
1301 			if (!job)
1302 				return -ETIME;
1303 
1304 			dma_fence_get(fence);
1305 			err = drm_sched_job_add_dependency(&job->drm, fence);
1306 			if (err)
1307 				return err;
1308 		}
1309 
1310 		rtfence = xe_range_fence_tree_next(rtfence,
1311 						   pt_update_ops->start,
1312 						   pt_update_ops->last);
1313 	}
1314 
1315 	list_for_each_entry(op, &vops->list, link) {
1316 		err = op_add_deps(vm, op, job);
1317 		if (err)
1318 			return err;
1319 	}
1320 
1321 	if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
1322 		if (job)
1323 			err = xe_sched_job_last_fence_add_dep(job, vm);
1324 		else
1325 			err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
1326 	}
1327 
1328 	for (i = 0; job && !err && i < vops->num_syncs; i++)
1329 		err = xe_sync_entry_add_deps(&vops->syncs[i], job);
1330 
1331 	return err;
1332 }
1333 
xe_pt_pre_commit(struct xe_migrate_pt_update * pt_update)1334 static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
1335 {
1336 	struct xe_vma_ops *vops = pt_update->vops;
1337 	struct xe_vm *vm = vops->vm;
1338 	struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
1339 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1340 		&vops->pt_update_ops[pt_update->tile_id];
1341 
1342 	return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
1343 				     pt_update_ops, rftree);
1344 }
1345 
1346 #ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
1347 
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)1348 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
1349 {
1350 	u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
1351 	static u32 count;
1352 
1353 	if (count++ % divisor == divisor - 1) {
1354 		uvma->userptr.divisor = divisor << 1;
1355 		return true;
1356 	}
1357 
1358 	return false;
1359 }
1360 
1361 #else
1362 
xe_pt_userptr_inject_eagain(struct xe_userptr_vma * uvma)1363 static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
1364 {
1365 	return false;
1366 }
1367 
1368 #endif
1369 
vma_check_userptr(struct xe_vm * vm,struct xe_vma * vma,struct xe_vm_pgtable_update_ops * pt_update)1370 static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
1371 			     struct xe_vm_pgtable_update_ops *pt_update)
1372 {
1373 	struct xe_userptr_vma *uvma;
1374 	unsigned long notifier_seq;
1375 
1376 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
1377 
1378 	if (!xe_vma_is_userptr(vma))
1379 		return 0;
1380 
1381 	uvma = to_userptr_vma(vma);
1382 	if (xe_pt_userptr_inject_eagain(uvma))
1383 		xe_vma_userptr_force_invalidate(uvma);
1384 
1385 	notifier_seq = uvma->userptr.notifier_seq;
1386 
1387 	if (!mmu_interval_read_retry(&uvma->userptr.notifier,
1388 				     notifier_seq))
1389 		return 0;
1390 
1391 	if (xe_vm_in_fault_mode(vm))
1392 		return -EAGAIN;
1393 
1394 	/*
1395 	 * Just continue the operation since exec or rebind worker
1396 	 * will take care of rebinding.
1397 	 */
1398 	return 0;
1399 }
1400 
op_check_userptr(struct xe_vm * vm,struct xe_vma_op * op,struct xe_vm_pgtable_update_ops * pt_update)1401 static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
1402 			    struct xe_vm_pgtable_update_ops *pt_update)
1403 {
1404 	int err = 0;
1405 
1406 	lockdep_assert_held_read(&vm->userptr.notifier_lock);
1407 
1408 	switch (op->base.op) {
1409 	case DRM_GPUVA_OP_MAP:
1410 		if (!op->map.immediate && xe_vm_in_fault_mode(vm))
1411 			break;
1412 
1413 		err = vma_check_userptr(vm, op->map.vma, pt_update);
1414 		break;
1415 	case DRM_GPUVA_OP_REMAP:
1416 		if (op->remap.prev)
1417 			err = vma_check_userptr(vm, op->remap.prev, pt_update);
1418 		if (!err && op->remap.next)
1419 			err = vma_check_userptr(vm, op->remap.next, pt_update);
1420 		break;
1421 	case DRM_GPUVA_OP_UNMAP:
1422 		break;
1423 	case DRM_GPUVA_OP_PREFETCH:
1424 		err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
1425 					pt_update);
1426 		break;
1427 	default:
1428 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
1429 	}
1430 
1431 	return err;
1432 }
1433 
xe_pt_userptr_pre_commit(struct xe_migrate_pt_update * pt_update)1434 static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
1435 {
1436 	struct xe_vm *vm = pt_update->vops->vm;
1437 	struct xe_vma_ops *vops = pt_update->vops;
1438 	struct xe_vm_pgtable_update_ops *pt_update_ops =
1439 		&vops->pt_update_ops[pt_update->tile_id];
1440 	struct xe_vma_op *op;
1441 	int err;
1442 
1443 	err = xe_pt_pre_commit(pt_update);
1444 	if (err)
1445 		return err;
1446 
1447 	down_read(&vm->userptr.notifier_lock);
1448 
1449 	list_for_each_entry(op, &vops->list, link) {
1450 		err = op_check_userptr(vm, op, pt_update_ops);
1451 		if (err) {
1452 			up_read(&vm->userptr.notifier_lock);
1453 			break;
1454 		}
1455 	}
1456 
1457 	return err;
1458 }
1459 
1460 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
xe_pt_svm_pre_commit(struct xe_migrate_pt_update * pt_update)1461 static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
1462 {
1463 	struct xe_vm *vm = pt_update->vops->vm;
1464 	struct xe_vma_ops *vops = pt_update->vops;
1465 	struct xe_vma_op *op;
1466 	unsigned long i;
1467 	int err;
1468 
1469 	err = xe_pt_pre_commit(pt_update);
1470 	if (err)
1471 		return err;
1472 
1473 	xe_svm_notifier_lock(vm);
1474 
1475 	list_for_each_entry(op, &vops->list, link) {
1476 		struct xe_svm_range *range = NULL;
1477 
1478 		if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
1479 			continue;
1480 
1481 		if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
1482 			xe_assert(vm->xe,
1483 				  xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
1484 			xa_for_each(&op->prefetch_range.range, i, range) {
1485 				xe_svm_range_debug(range, "PRE-COMMIT");
1486 
1487 				if (!xe_svm_range_pages_valid(range)) {
1488 					xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
1489 					xe_svm_notifier_unlock(vm);
1490 					return -ENODATA;
1491 				}
1492 			}
1493 		} else {
1494 			xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
1495 			xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
1496 			range = op->map_range.range;
1497 
1498 			xe_svm_range_debug(range, "PRE-COMMIT");
1499 
1500 			if (!xe_svm_range_pages_valid(range)) {
1501 				xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
1502 				xe_svm_notifier_unlock(vm);
1503 				return -EAGAIN;
1504 			}
1505 		}
1506 	}
1507 
1508 	return 0;
1509 }
1510 #endif
1511 
1512 struct invalidation_fence {
1513 	struct xe_gt_tlb_invalidation_fence base;
1514 	struct xe_gt *gt;
1515 	struct dma_fence *fence;
1516 	struct dma_fence_cb cb;
1517 	struct work_struct work;
1518 	u64 start;
1519 	u64 end;
1520 	u32 asid;
1521 };
1522 
invalidation_fence_cb(struct dma_fence * fence,struct dma_fence_cb * cb)1523 static void invalidation_fence_cb(struct dma_fence *fence,
1524 				  struct dma_fence_cb *cb)
1525 {
1526 	struct invalidation_fence *ifence =
1527 		container_of(cb, struct invalidation_fence, cb);
1528 	struct xe_device *xe = gt_to_xe(ifence->gt);
1529 
1530 	trace_xe_gt_tlb_invalidation_fence_cb(xe, &ifence->base);
1531 	if (!ifence->fence->error) {
1532 		queue_work(system_wq, &ifence->work);
1533 	} else {
1534 		ifence->base.base.error = ifence->fence->error;
1535 		xe_gt_tlb_invalidation_fence_signal(&ifence->base);
1536 	}
1537 	dma_fence_put(ifence->fence);
1538 }
1539 
invalidation_fence_work_func(struct work_struct * w)1540 static void invalidation_fence_work_func(struct work_struct *w)
1541 {
1542 	struct invalidation_fence *ifence =
1543 		container_of(w, struct invalidation_fence, work);
1544 	struct xe_device *xe = gt_to_xe(ifence->gt);
1545 
1546 	trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base);
1547 	xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
1548 				     ifence->end, ifence->asid);
1549 }
1550 
invalidation_fence_init(struct xe_gt * gt,struct invalidation_fence * ifence,struct dma_fence * fence,u64 start,u64 end,u32 asid)1551 static void invalidation_fence_init(struct xe_gt *gt,
1552 				    struct invalidation_fence *ifence,
1553 				    struct dma_fence *fence,
1554 				    u64 start, u64 end, u32 asid)
1555 {
1556 	int ret;
1557 
1558 	trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
1559 
1560 	xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
1561 
1562 	ifence->fence = fence;
1563 	ifence->gt = gt;
1564 	ifence->start = start;
1565 	ifence->end = end;
1566 	ifence->asid = asid;
1567 
1568 	INIT_WORK(&ifence->work, invalidation_fence_work_func);
1569 	ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
1570 	if (ret == -ENOENT) {
1571 		dma_fence_put(ifence->fence);	/* Usually dropped in CB */
1572 		invalidation_fence_work_func(&ifence->work);
1573 	} else if (ret) {
1574 		dma_fence_put(&ifence->base.base);	/* Caller ref */
1575 		dma_fence_put(&ifence->base.base);	/* Creation ref */
1576 	}
1577 
1578 	xe_gt_assert(gt, !ret || ret == -ENOENT);
1579 }
1580 
1581 struct xe_pt_stage_unbind_walk {
1582 	/** @base: The pagewalk base-class. */
1583 	struct xe_pt_walk base;
1584 
1585 	/* Input parameters for the walk */
1586 	/** @tile: The tile we're unbinding from. */
1587 	struct xe_tile *tile;
1588 
1589 	/**
1590 	 * @modified_start: Walk range start, modified to include any
1591 	 * shared pagetables that we're the only user of and can thus
1592 	 * treat as private.
1593 	 */
1594 	u64 modified_start;
1595 	/** @modified_end: Walk range start, modified like @modified_start. */
1596 	u64 modified_end;
1597 
1598 	/* Output */
1599 	/* @wupd: Structure to track the page-table updates we're building */
1600 	struct xe_walk_update wupd;
1601 };
1602 
1603 /*
1604  * Check whether this range is the only one populating this pagetable,
1605  * and in that case, update the walk range checks so that higher levels don't
1606  * view us as a shared pagetable.
1607  */
xe_pt_check_kill(u64 addr,u64 next,unsigned int level,const struct xe_pt * child,enum page_walk_action * action,struct xe_pt_walk * walk)1608 static bool xe_pt_check_kill(u64 addr, u64 next, unsigned int level,
1609 			     const struct xe_pt *child,
1610 			     enum page_walk_action *action,
1611 			     struct xe_pt_walk *walk)
1612 {
1613 	struct xe_pt_stage_unbind_walk *xe_walk =
1614 		container_of(walk, typeof(*xe_walk), base);
1615 	unsigned int shift = walk->shifts[level];
1616 	u64 size = 1ull << shift;
1617 
1618 	if (IS_ALIGNED(addr, size) && IS_ALIGNED(next, size) &&
1619 	    ((next - addr) >> shift) == child->num_live) {
1620 		u64 size = 1ull << walk->shifts[level + 1];
1621 
1622 		*action = ACTION_CONTINUE;
1623 
1624 		if (xe_walk->modified_start >= addr)
1625 			xe_walk->modified_start = round_down(addr, size);
1626 		if (xe_walk->modified_end <= next)
1627 			xe_walk->modified_end = round_up(next, size);
1628 
1629 		return true;
1630 	}
1631 
1632 	return false;
1633 }
1634 
xe_pt_stage_unbind_entry(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1635 static int xe_pt_stage_unbind_entry(struct xe_ptw *parent, pgoff_t offset,
1636 				    unsigned int level, u64 addr, u64 next,
1637 				    struct xe_ptw **child,
1638 				    enum page_walk_action *action,
1639 				    struct xe_pt_walk *walk)
1640 {
1641 	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1642 
1643 	XE_WARN_ON(!*child);
1644 	XE_WARN_ON(!level);
1645 
1646 	xe_pt_check_kill(addr, next, level - 1, xe_child, action, walk);
1647 
1648 	return 0;
1649 }
1650 
1651 static int
xe_pt_stage_unbind_post_descend(struct xe_ptw * parent,pgoff_t offset,unsigned int level,u64 addr,u64 next,struct xe_ptw ** child,enum page_walk_action * action,struct xe_pt_walk * walk)1652 xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
1653 				unsigned int level, u64 addr, u64 next,
1654 				struct xe_ptw **child,
1655 				enum page_walk_action *action,
1656 				struct xe_pt_walk *walk)
1657 {
1658 	struct xe_pt_stage_unbind_walk *xe_walk =
1659 		container_of(walk, typeof(*xe_walk), base);
1660 	struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
1661 	pgoff_t end_offset;
1662 	u64 size = 1ull << walk->shifts[--level];
1663 	int err;
1664 
1665 	if (!IS_ALIGNED(addr, size))
1666 		addr = xe_walk->modified_start;
1667 	if (!IS_ALIGNED(next, size))
1668 		next = xe_walk->modified_end;
1669 
1670 	/* Parent == *child is the root pt. Don't kill it. */
1671 	if (parent != *child &&
1672 	    xe_pt_check_kill(addr, next, level, xe_child, action, walk))
1673 		return 0;
1674 
1675 	if (!xe_pt_nonshared_offsets(addr, next, level, walk, action, &offset,
1676 				     &end_offset))
1677 		return 0;
1678 
1679 	err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
1680 	if (err)
1681 		return err;
1682 
1683 	xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
1684 
1685 	return 0;
1686 }
1687 
1688 static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
1689 	.pt_entry = xe_pt_stage_unbind_entry,
1690 	.pt_post_descend = xe_pt_stage_unbind_post_descend,
1691 };
1692 
1693 /**
1694  * xe_pt_stage_unbind() - Build page-table update structures for an unbind
1695  * operation
1696  * @tile: The tile we're unbinding for.
1697  * @vm: The vm
1698  * @vma: The vma we're unbinding.
1699  * @range: The range we're unbinding.
1700  * @entries: Caller-provided storage for the update structures.
1701  *
1702  * Builds page-table update structures for an unbind operation. The function
1703  * will attempt to remove all page-tables that we're the only user
1704  * of, and for that to work, the unbind operation must be committed in the
1705  * same critical section that blocks racing binds to the same page-table tree.
1706  *
1707  * Return: The number of entries used.
1708  */
xe_pt_stage_unbind(struct xe_tile * tile,struct xe_vm * vm,struct xe_vma * vma,struct xe_svm_range * range,struct xe_vm_pgtable_update * entries)1709 static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
1710 				       struct xe_vm *vm,
1711 				       struct xe_vma *vma,
1712 				       struct xe_svm_range *range,
1713 				       struct xe_vm_pgtable_update *entries)
1714 {
1715 	u64 start = range ? range->base.itree.start : xe_vma_start(vma);
1716 	u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
1717 	struct xe_pt_stage_unbind_walk xe_walk = {
1718 		.base = {
1719 			.ops = &xe_pt_stage_unbind_ops,
1720 			.shifts = xe_normal_pt_shifts,
1721 			.max_level = XE_PT_HIGHEST_LEVEL,
1722 			.staging = true,
1723 		},
1724 		.tile = tile,
1725 		.modified_start = start,
1726 		.modified_end = end,
1727 		.wupd.entries = entries,
1728 	};
1729 	struct xe_pt *pt = vm->pt_root[tile->id];
1730 
1731 	(void)xe_pt_walk_shared(&pt->base, pt->level, start, end,
1732 				&xe_walk.base);
1733 
1734 	return xe_walk.wupd.num_used_entries;
1735 }
1736 
1737 static void
xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update * pt_update,struct xe_tile * tile,struct iosys_map * map,void * ptr,u32 qword_ofs,u32 num_qwords,const struct xe_vm_pgtable_update * update)1738 xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
1739 				  struct xe_tile *tile, struct iosys_map *map,
1740 				  void *ptr, u32 qword_ofs, u32 num_qwords,
1741 				  const struct xe_vm_pgtable_update *update)
1742 {
1743 	struct xe_vm *vm = pt_update->vops->vm;
1744 	u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
1745 	int i;
1746 
1747 	if (map && map->is_iomem)
1748 		for (i = 0; i < num_qwords; ++i)
1749 			xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) *
1750 				  sizeof(u64), u64, empty);
1751 	else if (map)
1752 		memset64(map->vaddr + qword_ofs * sizeof(u64), empty,
1753 			 num_qwords);
1754 	else
1755 		memset64(ptr, empty, num_qwords);
1756 }
1757 
xe_pt_abort_unbind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)1758 static void xe_pt_abort_unbind(struct xe_vma *vma,
1759 			       struct xe_vm_pgtable_update *entries,
1760 			       u32 num_entries)
1761 {
1762 	int i, j;
1763 
1764 	xe_pt_commit_prepare_locks_assert(vma);
1765 
1766 	for (i = num_entries - 1; i >= 0; --i) {
1767 		struct xe_vm_pgtable_update *entry = &entries[i];
1768 		struct xe_pt *pt = entry->pt;
1769 		struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
1770 
1771 		pt->num_live += entry->qwords;
1772 
1773 		if (!pt->level)
1774 			continue;
1775 
1776 		for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
1777 			pt_dir->staging[j] =
1778 				entries[i].pt_entries[j - entry->ofs].pt ?
1779 				&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
1780 	}
1781 }
1782 
1783 static void
xe_pt_commit_prepare_unbind(struct xe_vma * vma,struct xe_vm_pgtable_update * entries,u32 num_entries)1784 xe_pt_commit_prepare_unbind(struct xe_vma *vma,
1785 			    struct xe_vm_pgtable_update *entries,
1786 			    u32 num_entries)
1787 {
1788 	int i, j;
1789 
1790 	xe_pt_commit_prepare_locks_assert(vma);
1791 
1792 	for (i = 0; i < num_entries; ++i) {
1793 		struct xe_vm_pgtable_update *entry = &entries[i];
1794 		struct xe_pt *pt = entry->pt;
1795 		struct xe_pt_dir *pt_dir;
1796 
1797 		pt->num_live -= entry->qwords;
1798 		if (!pt->level)
1799 			continue;
1800 
1801 		pt_dir = as_xe_pt_dir(pt);
1802 		for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
1803 			entry->pt_entries[j - entry->ofs].pt =
1804 				xe_pt_entry_staging(pt_dir, j);
1805 			pt_dir->staging[j] = NULL;
1806 		}
1807 	}
1808 }
1809 
1810 static void
xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops * pt_update_ops,u64 start,u64 end)1811 xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
1812 				 u64 start, u64 end)
1813 {
1814 	u64 last;
1815 	u32 current_op = pt_update_ops->current_op;
1816 	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1817 	int i, level = 0;
1818 
1819 	for (i = 0; i < pt_op->num_entries; i++) {
1820 		const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
1821 
1822 		if (entry->pt->level > level)
1823 			level = entry->pt->level;
1824 	}
1825 
1826 	/* Greedy (non-optimal) calculation but simple */
1827 	start = ALIGN_DOWN(start, 0x1ull << xe_pt_shift(level));
1828 	last = ALIGN(end, 0x1ull << xe_pt_shift(level)) - 1;
1829 
1830 	if (start < pt_update_ops->start)
1831 		pt_update_ops->start = start;
1832 	if (last > pt_update_ops->last)
1833 		pt_update_ops->last = last;
1834 }
1835 
vma_reserve_fences(struct xe_device * xe,struct xe_vma * vma)1836 static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
1837 {
1838 	int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
1839 
1840 	if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
1841 		return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
1842 					       xe->info.tile_count << shift);
1843 
1844 	return 0;
1845 }
1846 
bind_op_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,bool invalidate_on_bind)1847 static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
1848 			   struct xe_vm_pgtable_update_ops *pt_update_ops,
1849 			   struct xe_vma *vma, bool invalidate_on_bind)
1850 {
1851 	u32 current_op = pt_update_ops->current_op;
1852 	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1853 	int err;
1854 
1855 	xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
1856 	xe_bo_assert_held(xe_vma_bo(vma));
1857 
1858 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
1859 	       "Preparing bind, with range [%llx...%llx)\n",
1860 	       xe_vma_start(vma), xe_vma_end(vma) - 1);
1861 
1862 	pt_op->vma = NULL;
1863 	pt_op->bind = true;
1864 	pt_op->rebind = BIT(tile->id) & vma->tile_present;
1865 
1866 	err = vma_reserve_fences(tile_to_xe(tile), vma);
1867 	if (err)
1868 		return err;
1869 
1870 	err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
1871 				 &pt_op->num_entries, invalidate_on_bind);
1872 	if (!err) {
1873 		xe_tile_assert(tile, pt_op->num_entries <=
1874 			       ARRAY_SIZE(pt_op->entries));
1875 		xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
1876 					pt_op->num_entries, true);
1877 
1878 		xe_pt_update_ops_rfence_interval(pt_update_ops,
1879 						 xe_vma_start(vma),
1880 						 xe_vma_end(vma));
1881 		++pt_update_ops->current_op;
1882 		pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
1883 
1884 		/*
1885 		 * If rebind, we have to invalidate TLB on !LR vms to invalidate
1886 		 * cached PTEs point to freed memory. On LR vms this is done
1887 		 * automatically when the context is re-enabled by the rebind worker,
1888 		 * or in fault mode it was invalidated on PTE zapping.
1889 		 *
1890 		 * If !rebind, and scratch enabled VMs, there is a chance the scratch
1891 		 * PTE is already cached in the TLB so it needs to be invalidated.
1892 		 * On !LR VMs this is done in the ring ops preceding a batch, but on
1893 		 * LR, in particular on user-space batch buffer chaining, it needs to
1894 		 * be done here.
1895 		 */
1896 		if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
1897 		     xe_vm_in_lr_mode(vm)))
1898 			pt_update_ops->needs_invalidation = true;
1899 		else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
1900 			/* We bump also if batch_invalidate_tlb is true */
1901 			vm->tlb_flush_seqno++;
1902 
1903 		vma->tile_staged |= BIT(tile->id);
1904 		pt_op->vma = vma;
1905 		xe_pt_commit_prepare_bind(vma, pt_op->entries,
1906 					  pt_op->num_entries, pt_op->rebind);
1907 	} else {
1908 		xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
1909 	}
1910 
1911 	return err;
1912 }
1913 
bind_range_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct xe_svm_range * range)1914 static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
1915 			      struct xe_vm_pgtable_update_ops *pt_update_ops,
1916 			      struct xe_vma *vma, struct xe_svm_range *range)
1917 {
1918 	u32 current_op = pt_update_ops->current_op;
1919 	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1920 	int err;
1921 
1922 	xe_tile_assert(tile, xe_vma_is_cpu_addr_mirror(vma));
1923 
1924 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
1925 	       "Preparing bind, with range [%lx...%lx)\n",
1926 	       range->base.itree.start, range->base.itree.last);
1927 
1928 	pt_op->vma = NULL;
1929 	pt_op->bind = true;
1930 	pt_op->rebind = BIT(tile->id) & range->tile_present;
1931 
1932 	err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
1933 				 &pt_op->num_entries, false);
1934 	if (!err) {
1935 		xe_tile_assert(tile, pt_op->num_entries <=
1936 			       ARRAY_SIZE(pt_op->entries));
1937 		xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
1938 					pt_op->num_entries, true);
1939 
1940 		xe_pt_update_ops_rfence_interval(pt_update_ops,
1941 						 range->base.itree.start,
1942 						 range->base.itree.last + 1);
1943 		++pt_update_ops->current_op;
1944 		pt_update_ops->needs_svm_lock = true;
1945 
1946 		pt_op->vma = vma;
1947 		xe_pt_commit_prepare_bind(vma, pt_op->entries,
1948 					  pt_op->num_entries, pt_op->rebind);
1949 	} else {
1950 		xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
1951 	}
1952 
1953 	return err;
1954 }
1955 
unbind_op_prepare(struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma)1956 static int unbind_op_prepare(struct xe_tile *tile,
1957 			     struct xe_vm_pgtable_update_ops *pt_update_ops,
1958 			     struct xe_vma *vma)
1959 {
1960 	u32 current_op = pt_update_ops->current_op;
1961 	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
1962 	int err;
1963 
1964 	if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
1965 		return 0;
1966 
1967 	xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
1968 	xe_bo_assert_held(xe_vma_bo(vma));
1969 
1970 	vm_dbg(&xe_vma_vm(vma)->xe->drm,
1971 	       "Preparing unbind, with range [%llx...%llx)\n",
1972 	       xe_vma_start(vma), xe_vma_end(vma) - 1);
1973 
1974 	pt_op->vma = vma;
1975 	pt_op->bind = false;
1976 	pt_op->rebind = false;
1977 
1978 	err = vma_reserve_fences(tile_to_xe(tile), vma);
1979 	if (err)
1980 		return err;
1981 
1982 	pt_op->num_entries = xe_pt_stage_unbind(tile, xe_vma_vm(vma),
1983 						vma, NULL, pt_op->entries);
1984 
1985 	xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
1986 				pt_op->num_entries, false);
1987 	xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
1988 					 xe_vma_end(vma));
1989 	++pt_update_ops->current_op;
1990 	pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
1991 	pt_update_ops->needs_invalidation = true;
1992 
1993 	xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
1994 
1995 	return 0;
1996 }
1997 
1998 static bool
xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op * pt_op,struct xe_svm_range * range)1999 xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op,
2000 				       struct xe_svm_range *range)
2001 {
2002 	struct xe_vm_pgtable_update *update = pt_op->entries;
2003 
2004 	XE_WARN_ON(!pt_op->num_entries);
2005 
2006 	/*
2007 	 * We can't skip the invalidation if we are removing PTEs that span more
2008 	 * than the range, do some checks to ensure we are removing PTEs that
2009 	 * are invalid.
2010 	 */
2011 
2012 	if (pt_op->num_entries > 1)
2013 		return false;
2014 
2015 	if (update->pt->level == 0)
2016 		return true;
2017 
2018 	if (update->pt->level == 1)
2019 		return xe_svm_range_size(range) >= SZ_2M;
2020 
2021 	return false;
2022 }
2023 
unbind_range_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_svm_range * range)2024 static int unbind_range_prepare(struct xe_vm *vm,
2025 				struct xe_tile *tile,
2026 				struct xe_vm_pgtable_update_ops *pt_update_ops,
2027 				struct xe_svm_range *range)
2028 {
2029 	u32 current_op = pt_update_ops->current_op;
2030 	struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
2031 
2032 	if (!(range->tile_present & BIT(tile->id)))
2033 		return 0;
2034 
2035 	vm_dbg(&vm->xe->drm,
2036 	       "Preparing unbind, with range [%lx...%lx)\n",
2037 	       range->base.itree.start, range->base.itree.last);
2038 
2039 	pt_op->vma = XE_INVALID_VMA;
2040 	pt_op->bind = false;
2041 	pt_op->rebind = false;
2042 
2043 	pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range,
2044 						pt_op->entries);
2045 
2046 	xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
2047 				pt_op->num_entries, false);
2048 	xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
2049 					 range->base.itree.last + 1);
2050 	++pt_update_ops->current_op;
2051 	pt_update_ops->needs_svm_lock = true;
2052 	pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
2053 		xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
2054 					    range->tile_invalidated) ||
2055 		!xe_pt_op_check_range_skip_invalidation(pt_op, range);
2056 
2057 	xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
2058 				    pt_op->num_entries);
2059 
2060 	return 0;
2061 }
2062 
op_prepare(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma_op * op)2063 static int op_prepare(struct xe_vm *vm,
2064 		      struct xe_tile *tile,
2065 		      struct xe_vm_pgtable_update_ops *pt_update_ops,
2066 		      struct xe_vma_op *op)
2067 {
2068 	int err = 0;
2069 
2070 	xe_vm_assert_held(vm);
2071 
2072 	switch (op->base.op) {
2073 	case DRM_GPUVA_OP_MAP:
2074 		if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
2075 		     !op->map.invalidate_on_bind) ||
2076 		    op->map.is_cpu_addr_mirror)
2077 			break;
2078 
2079 		err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
2080 				      op->map.invalidate_on_bind);
2081 		pt_update_ops->wait_vm_kernel = true;
2082 		break;
2083 	case DRM_GPUVA_OP_REMAP:
2084 	{
2085 		struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
2086 
2087 		if (xe_vma_is_cpu_addr_mirror(old))
2088 			break;
2089 
2090 		err = unbind_op_prepare(tile, pt_update_ops, old);
2091 
2092 		if (!err && op->remap.prev) {
2093 			err = bind_op_prepare(vm, tile, pt_update_ops,
2094 					      op->remap.prev, false);
2095 			pt_update_ops->wait_vm_bookkeep = true;
2096 		}
2097 		if (!err && op->remap.next) {
2098 			err = bind_op_prepare(vm, tile, pt_update_ops,
2099 					      op->remap.next, false);
2100 			pt_update_ops->wait_vm_bookkeep = true;
2101 		}
2102 		break;
2103 	}
2104 	case DRM_GPUVA_OP_UNMAP:
2105 	{
2106 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2107 
2108 		if (xe_vma_is_cpu_addr_mirror(vma))
2109 			break;
2110 
2111 		err = unbind_op_prepare(tile, pt_update_ops, vma);
2112 		break;
2113 	}
2114 	case DRM_GPUVA_OP_PREFETCH:
2115 	{
2116 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2117 
2118 		if (xe_vma_is_cpu_addr_mirror(vma)) {
2119 			struct xe_svm_range *range;
2120 			unsigned long i;
2121 
2122 			xa_for_each(&op->prefetch_range.range, i, range) {
2123 				err = bind_range_prepare(vm, tile, pt_update_ops,
2124 							 vma, range);
2125 				if (err)
2126 					return err;
2127 			}
2128 		} else {
2129 			err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
2130 			pt_update_ops->wait_vm_kernel = true;
2131 		}
2132 		break;
2133 	}
2134 	case DRM_GPUVA_OP_DRIVER:
2135 		if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
2136 			xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
2137 
2138 			err = bind_range_prepare(vm, tile, pt_update_ops,
2139 						 op->map_range.vma,
2140 						 op->map_range.range);
2141 		} else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
2142 			err = unbind_range_prepare(vm, tile, pt_update_ops,
2143 						   op->unmap_range.range);
2144 		}
2145 		break;
2146 	default:
2147 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2148 	}
2149 
2150 	return err;
2151 }
2152 
2153 static void
xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops * pt_update_ops)2154 xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
2155 {
2156 	init_llist_head(&pt_update_ops->deferred);
2157 	pt_update_ops->start = ~0x0ull;
2158 	pt_update_ops->last = 0x0ull;
2159 }
2160 
2161 /**
2162  * xe_pt_update_ops_prepare() - Prepare PT update operations
2163  * @tile: Tile of PT update operations
2164  * @vops: VMA operationa
2165  *
2166  * Prepare PT update operations which includes updating internal PT state,
2167  * allocate memory for page tables, populate page table being pruned in, and
2168  * create PT update operations for leaf insertion / removal.
2169  *
2170  * Return: 0 on success, negative error code on error.
2171  */
xe_pt_update_ops_prepare(struct xe_tile * tile,struct xe_vma_ops * vops)2172 int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
2173 {
2174 	struct xe_vm_pgtable_update_ops *pt_update_ops =
2175 		&vops->pt_update_ops[tile->id];
2176 	struct xe_vma_op *op;
2177 	int shift = tile->media_gt ? 1 : 0;
2178 	int err;
2179 
2180 	lockdep_assert_held(&vops->vm->lock);
2181 	xe_vm_assert_held(vops->vm);
2182 
2183 	xe_pt_update_ops_init(pt_update_ops);
2184 
2185 	err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
2186 				      tile_to_xe(tile)->info.tile_count << shift);
2187 	if (err)
2188 		return err;
2189 
2190 	list_for_each_entry(op, &vops->list, link) {
2191 		err = op_prepare(vops->vm, tile, pt_update_ops, op);
2192 
2193 		if (err)
2194 			return err;
2195 	}
2196 
2197 	xe_tile_assert(tile, pt_update_ops->current_op <=
2198 		       pt_update_ops->num_ops);
2199 
2200 #ifdef TEST_VM_OPS_ERROR
2201 	if (vops->inject_error &&
2202 	    vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
2203 		return -ENOSPC;
2204 #endif
2205 
2206 	return 0;
2207 }
2208 ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
2209 
bind_op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct dma_fence * fence,struct dma_fence * fence2,bool invalidate_on_bind)2210 static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
2211 			   struct xe_vm_pgtable_update_ops *pt_update_ops,
2212 			   struct xe_vma *vma, struct dma_fence *fence,
2213 			   struct dma_fence *fence2, bool invalidate_on_bind)
2214 {
2215 	xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
2216 
2217 	if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
2218 		dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
2219 				   pt_update_ops->wait_vm_bookkeep ?
2220 				   DMA_RESV_USAGE_KERNEL :
2221 				   DMA_RESV_USAGE_BOOKKEEP);
2222 		if (fence2)
2223 			dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
2224 					   pt_update_ops->wait_vm_bookkeep ?
2225 					   DMA_RESV_USAGE_KERNEL :
2226 					   DMA_RESV_USAGE_BOOKKEEP);
2227 	}
2228 	/* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
2229 	WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
2230 	if (invalidate_on_bind)
2231 		WRITE_ONCE(vma->tile_invalidated,
2232 			   vma->tile_invalidated | BIT(tile->id));
2233 	else
2234 		WRITE_ONCE(vma->tile_invalidated,
2235 			   vma->tile_invalidated & ~BIT(tile->id));
2236 	vma->tile_staged &= ~BIT(tile->id);
2237 	if (xe_vma_is_userptr(vma)) {
2238 		lockdep_assert_held_read(&vm->userptr.notifier_lock);
2239 		to_userptr_vma(vma)->userptr.initial_bind = true;
2240 	}
2241 
2242 	/*
2243 	 * Kick rebind worker if this bind triggers preempt fences and not in
2244 	 * the rebind worker
2245 	 */
2246 	if (pt_update_ops->wait_vm_bookkeep &&
2247 	    xe_vm_in_preempt_fence_mode(vm) &&
2248 	    !current->mm)
2249 		xe_vm_queue_rebind_worker(vm);
2250 }
2251 
unbind_op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma * vma,struct dma_fence * fence,struct dma_fence * fence2)2252 static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
2253 			     struct xe_vm_pgtable_update_ops *pt_update_ops,
2254 			     struct xe_vma *vma, struct dma_fence *fence,
2255 			     struct dma_fence *fence2)
2256 {
2257 	xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
2258 
2259 	if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
2260 		dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
2261 				   pt_update_ops->wait_vm_bookkeep ?
2262 				   DMA_RESV_USAGE_KERNEL :
2263 				   DMA_RESV_USAGE_BOOKKEEP);
2264 		if (fence2)
2265 			dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
2266 					   pt_update_ops->wait_vm_bookkeep ?
2267 					   DMA_RESV_USAGE_KERNEL :
2268 					   DMA_RESV_USAGE_BOOKKEEP);
2269 	}
2270 	vma->tile_present &= ~BIT(tile->id);
2271 	if (!vma->tile_present) {
2272 		list_del_init(&vma->combined_links.rebind);
2273 		if (xe_vma_is_userptr(vma)) {
2274 			lockdep_assert_held_read(&vm->userptr.notifier_lock);
2275 
2276 			spin_lock(&vm->userptr.invalidated_lock);
2277 			list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
2278 			spin_unlock(&vm->userptr.invalidated_lock);
2279 		}
2280 	}
2281 }
2282 
range_present_and_invalidated_tile(struct xe_vm * vm,struct xe_svm_range * range,u8 tile_id)2283 static void range_present_and_invalidated_tile(struct xe_vm *vm,
2284 					       struct xe_svm_range *range,
2285 					       u8 tile_id)
2286 {
2287 	/* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
2288 
2289 	lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
2290 
2291 	WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id));
2292 	WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id));
2293 }
2294 
op_commit(struct xe_vm * vm,struct xe_tile * tile,struct xe_vm_pgtable_update_ops * pt_update_ops,struct xe_vma_op * op,struct dma_fence * fence,struct dma_fence * fence2)2295 static void op_commit(struct xe_vm *vm,
2296 		      struct xe_tile *tile,
2297 		      struct xe_vm_pgtable_update_ops *pt_update_ops,
2298 		      struct xe_vma_op *op, struct dma_fence *fence,
2299 		      struct dma_fence *fence2)
2300 {
2301 	xe_vm_assert_held(vm);
2302 
2303 	switch (op->base.op) {
2304 	case DRM_GPUVA_OP_MAP:
2305 		if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
2306 		    op->map.is_cpu_addr_mirror)
2307 			break;
2308 
2309 		bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
2310 			       fence2, op->map.invalidate_on_bind);
2311 		break;
2312 	case DRM_GPUVA_OP_REMAP:
2313 	{
2314 		struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
2315 
2316 		if (xe_vma_is_cpu_addr_mirror(old))
2317 			break;
2318 
2319 		unbind_op_commit(vm, tile, pt_update_ops, old, fence, fence2);
2320 
2321 		if (op->remap.prev)
2322 			bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
2323 				       fence, fence2, false);
2324 		if (op->remap.next)
2325 			bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
2326 				       fence, fence2, false);
2327 		break;
2328 	}
2329 	case DRM_GPUVA_OP_UNMAP:
2330 	{
2331 		struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
2332 
2333 		if (!xe_vma_is_cpu_addr_mirror(vma))
2334 			unbind_op_commit(vm, tile, pt_update_ops, vma, fence,
2335 					 fence2);
2336 		break;
2337 	}
2338 	case DRM_GPUVA_OP_PREFETCH:
2339 	{
2340 		struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
2341 
2342 		if (xe_vma_is_cpu_addr_mirror(vma)) {
2343 			struct xe_svm_range *range = NULL;
2344 			unsigned long i;
2345 
2346 			xa_for_each(&op->prefetch_range.range, i, range)
2347 				range_present_and_invalidated_tile(vm, range, tile->id);
2348 		} else {
2349 			bind_op_commit(vm, tile, pt_update_ops, vma, fence,
2350 				       fence2, false);
2351 		}
2352 		break;
2353 	}
2354 	case DRM_GPUVA_OP_DRIVER:
2355 	{
2356 		/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
2357 		if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
2358 			range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
2359 		else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
2360 			WRITE_ONCE(op->unmap_range.range->tile_present,
2361 				   op->unmap_range.range->tile_present &
2362 				   ~BIT(tile->id));
2363 
2364 		break;
2365 	}
2366 	default:
2367 		drm_warn(&vm->xe->drm, "NOT POSSIBLE");
2368 	}
2369 }
2370 
2371 static const struct xe_migrate_pt_update_ops migrate_ops = {
2372 	.populate = xe_vm_populate_pgtable,
2373 	.clear = xe_migrate_clear_pgtable_callback,
2374 	.pre_commit = xe_pt_pre_commit,
2375 };
2376 
2377 static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
2378 	.populate = xe_vm_populate_pgtable,
2379 	.clear = xe_migrate_clear_pgtable_callback,
2380 	.pre_commit = xe_pt_userptr_pre_commit,
2381 };
2382 
2383 #if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
2384 static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
2385 	.populate = xe_vm_populate_pgtable,
2386 	.clear = xe_migrate_clear_pgtable_callback,
2387 	.pre_commit = xe_pt_svm_pre_commit,
2388 };
2389 #else
2390 static const struct xe_migrate_pt_update_ops svm_migrate_ops;
2391 #endif
2392 
2393 /**
2394  * xe_pt_update_ops_run() - Run PT update operations
2395  * @tile: Tile of PT update operations
2396  * @vops: VMA operationa
2397  *
2398  * Run PT update operations which includes committing internal PT state changes,
2399  * creating job for PT update operations for leaf insertion / removal, and
2400  * installing job fence in various places.
2401  *
2402  * Return: fence on success, negative ERR_PTR on error.
2403  */
2404 struct dma_fence *
xe_pt_update_ops_run(struct xe_tile * tile,struct xe_vma_ops * vops)2405 xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
2406 {
2407 	struct xe_vm *vm = vops->vm;
2408 	struct xe_vm_pgtable_update_ops *pt_update_ops =
2409 		&vops->pt_update_ops[tile->id];
2410 	struct dma_fence *fence;
2411 	struct invalidation_fence *ifence = NULL, *mfence = NULL;
2412 	struct dma_fence **fences = NULL;
2413 	struct dma_fence_array *cf = NULL;
2414 	struct xe_range_fence *rfence;
2415 	struct xe_vma_op *op;
2416 	int err = 0, i;
2417 	struct xe_migrate_pt_update update = {
2418 		.ops = pt_update_ops->needs_svm_lock ?
2419 			&svm_migrate_ops :
2420 			pt_update_ops->needs_userptr_lock ?
2421 			&userptr_migrate_ops :
2422 			&migrate_ops,
2423 		.vops = vops,
2424 		.tile_id = tile->id,
2425 	};
2426 
2427 	lockdep_assert_held(&vm->lock);
2428 	xe_vm_assert_held(vm);
2429 
2430 	if (!pt_update_ops->current_op) {
2431 		xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
2432 
2433 		return dma_fence_get_stub();
2434 	}
2435 
2436 #ifdef TEST_VM_OPS_ERROR
2437 	if (vops->inject_error &&
2438 	    vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
2439 		return ERR_PTR(-ENOSPC);
2440 #endif
2441 
2442 	if (pt_update_ops->needs_invalidation) {
2443 		ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
2444 		if (!ifence) {
2445 			err = -ENOMEM;
2446 			goto kill_vm_tile1;
2447 		}
2448 		if (tile->media_gt) {
2449 			mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
2450 			if (!mfence) {
2451 				err = -ENOMEM;
2452 				goto free_ifence;
2453 			}
2454 			fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
2455 			if (!fences) {
2456 				err = -ENOMEM;
2457 				goto free_ifence;
2458 			}
2459 			cf = dma_fence_array_alloc(2);
2460 			if (!cf) {
2461 				err = -ENOMEM;
2462 				goto free_ifence;
2463 			}
2464 		}
2465 	}
2466 
2467 	rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
2468 	if (!rfence) {
2469 		err = -ENOMEM;
2470 		goto free_ifence;
2471 	}
2472 
2473 	fence = xe_migrate_update_pgtables(tile->migrate, &update);
2474 	if (IS_ERR(fence)) {
2475 		err = PTR_ERR(fence);
2476 		goto free_rfence;
2477 	}
2478 
2479 	/* Point of no return - VM killed if failure after this */
2480 	for (i = 0; i < pt_update_ops->current_op; ++i) {
2481 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
2482 
2483 		xe_pt_commit(pt_op->vma, pt_op->entries,
2484 			     pt_op->num_entries, &pt_update_ops->deferred);
2485 		pt_op->vma = NULL;	/* skip in xe_pt_update_ops_abort */
2486 	}
2487 
2488 	if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
2489 				  &xe_range_fence_kfree_ops,
2490 				  pt_update_ops->start,
2491 				  pt_update_ops->last, fence))
2492 		dma_fence_wait(fence, false);
2493 
2494 	/* tlb invalidation must be done before signaling rebind */
2495 	if (ifence) {
2496 		if (mfence)
2497 			dma_fence_get(fence);
2498 		invalidation_fence_init(tile->primary_gt, ifence, fence,
2499 					pt_update_ops->start,
2500 					pt_update_ops->last, vm->usm.asid);
2501 		if (mfence) {
2502 			invalidation_fence_init(tile->media_gt, mfence, fence,
2503 						pt_update_ops->start,
2504 						pt_update_ops->last, vm->usm.asid);
2505 			fences[0] = &ifence->base.base;
2506 			fences[1] = &mfence->base.base;
2507 			dma_fence_array_init(cf, 2, fences,
2508 					     vm->composite_fence_ctx,
2509 					     vm->composite_fence_seqno++,
2510 					     false);
2511 			fence = &cf->base;
2512 		} else {
2513 			fence = &ifence->base.base;
2514 		}
2515 	}
2516 
2517 	if (!mfence) {
2518 		dma_resv_add_fence(xe_vm_resv(vm), fence,
2519 				   pt_update_ops->wait_vm_bookkeep ?
2520 				   DMA_RESV_USAGE_KERNEL :
2521 				   DMA_RESV_USAGE_BOOKKEEP);
2522 
2523 		list_for_each_entry(op, &vops->list, link)
2524 			op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
2525 	} else {
2526 		dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
2527 				   pt_update_ops->wait_vm_bookkeep ?
2528 				   DMA_RESV_USAGE_KERNEL :
2529 				   DMA_RESV_USAGE_BOOKKEEP);
2530 
2531 		dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
2532 				   pt_update_ops->wait_vm_bookkeep ?
2533 				   DMA_RESV_USAGE_KERNEL :
2534 				   DMA_RESV_USAGE_BOOKKEEP);
2535 
2536 		list_for_each_entry(op, &vops->list, link)
2537 			op_commit(vops->vm, tile, pt_update_ops, op,
2538 				  &ifence->base.base, &mfence->base.base);
2539 	}
2540 
2541 	if (pt_update_ops->needs_svm_lock)
2542 		xe_svm_notifier_unlock(vm);
2543 	if (pt_update_ops->needs_userptr_lock)
2544 		up_read(&vm->userptr.notifier_lock);
2545 
2546 	return fence;
2547 
2548 free_rfence:
2549 	kfree(rfence);
2550 free_ifence:
2551 	kfree(cf);
2552 	kfree(fences);
2553 	kfree(mfence);
2554 	kfree(ifence);
2555 kill_vm_tile1:
2556 	if (err != -EAGAIN && err != -ENODATA && tile->id)
2557 		xe_vm_kill(vops->vm, false);
2558 
2559 	return ERR_PTR(err);
2560 }
2561 ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO);
2562 
2563 /**
2564  * xe_pt_update_ops_fini() - Finish PT update operations
2565  * @tile: Tile of PT update operations
2566  * @vops: VMA operations
2567  *
2568  * Finish PT update operations by committing to destroy page table memory
2569  */
xe_pt_update_ops_fini(struct xe_tile * tile,struct xe_vma_ops * vops)2570 void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
2571 {
2572 	struct xe_vm_pgtable_update_ops *pt_update_ops =
2573 		&vops->pt_update_ops[tile->id];
2574 	int i;
2575 
2576 	lockdep_assert_held(&vops->vm->lock);
2577 	xe_vm_assert_held(vops->vm);
2578 
2579 	for (i = 0; i < pt_update_ops->current_op; ++i) {
2580 		struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
2581 
2582 		xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
2583 	}
2584 	xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
2585 }
2586 
2587 /**
2588  * xe_pt_update_ops_abort() - Abort PT update operations
2589  * @tile: Tile of PT update operations
2590  * @vops: VMA operationa
2591  *
2592  *  Abort PT update operations by unwinding internal PT state
2593  */
xe_pt_update_ops_abort(struct xe_tile * tile,struct xe_vma_ops * vops)2594 void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
2595 {
2596 	struct xe_vm_pgtable_update_ops *pt_update_ops =
2597 		&vops->pt_update_ops[tile->id];
2598 	int i;
2599 
2600 	lockdep_assert_held(&vops->vm->lock);
2601 	xe_vm_assert_held(vops->vm);
2602 
2603 	for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
2604 		struct xe_vm_pgtable_update_op *pt_op =
2605 			&pt_update_ops->ops[i];
2606 
2607 		if (!pt_op->vma || i >= pt_update_ops->current_op)
2608 			continue;
2609 
2610 		if (pt_op->bind)
2611 			xe_pt_abort_bind(pt_op->vma, pt_op->entries,
2612 					 pt_op->num_entries,
2613 					 pt_op->rebind);
2614 		else
2615 			xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
2616 					   pt_op->num_entries);
2617 	}
2618 
2619 	xe_pt_update_ops_fini(tile, vops);
2620 }
2621