xref: /linux/drivers/gpu/drm/xe/xe_svm.h (revision b803c4a4f78834b31ebfbbcea350473333760559)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #include <drm/drm_pagemap.h>
10 #include <drm/drm_gpusvm.h>
11 
12 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
13 
14 struct xe_bo;
15 struct xe_vram_region;
16 struct xe_tile;
17 struct xe_vm;
18 struct xe_vma;
19 
20 /** struct xe_svm_range - SVM range */
21 struct xe_svm_range {
22 	/** @base: base drm_gpusvm_range */
23 	struct drm_gpusvm_range base;
24 	/**
25 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
26 	 * list. Protected by VM's garbage collect lock.
27 	 */
28 	struct list_head garbage_collector_link;
29 	/**
30 	 * @tile_present: Tile mask of binding is present for this range.
31 	 * Protected by GPU SVM notifier lock.
32 	 */
33 	u8 tile_present;
34 	/**
35 	 * @tile_invalidated: Tile mask of binding is invalidated for this
36 	 * range. Protected by GPU SVM notifier lock.
37 	 */
38 	u8 tile_invalidated;
39 	/**
40 	 * @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
41 	 * locking.
42 	 */
43 	u8 skip_migrate	:1;
44 };
45 
46 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
47 /**
48  * xe_svm_range_pages_valid() - SVM range pages valid
49  * @range: SVM range
50  *
51  * Return: True if SVM range pages are valid, False otherwise
52  */
53 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
54 {
55 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
56 }
57 
58 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
59 
60 int xe_svm_init(struct xe_vm *vm);
61 
62 void xe_svm_fini(struct xe_vm *vm);
63 
64 void xe_svm_close(struct xe_vm *vm);
65 
66 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
67 			    struct xe_tile *tile, u64 fault_addr,
68 			    bool atomic);
69 
70 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
71 
72 int xe_svm_bo_evict(struct xe_bo *bo);
73 
74 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
75 
76 void xe_svm_flush(struct xe_vm *vm);
77 
78 #else
79 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
80 {
81 	return false;
82 }
83 
84 static inline
85 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
86 {
87 	return 0;
88 }
89 
90 static inline
91 int xe_svm_init(struct xe_vm *vm)
92 {
93 	return 0;
94 }
95 
96 static inline
97 void xe_svm_fini(struct xe_vm *vm)
98 {
99 }
100 
101 static inline
102 void xe_svm_close(struct xe_vm *vm)
103 {
104 }
105 
106 static inline
107 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
108 			    struct xe_tile *tile, u64 fault_addr,
109 			    bool atomic)
110 {
111 	return 0;
112 }
113 
114 static inline
115 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
116 {
117 	return false;
118 }
119 
120 static inline
121 int xe_svm_bo_evict(struct xe_bo *bo)
122 {
123 	return 0;
124 }
125 
126 static inline
127 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
128 {
129 }
130 
131 static inline void xe_svm_flush(struct xe_vm *vm)
132 {
133 }
134 
135 #endif
136 
137 /**
138  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
139  * @range: SVM range
140  *
141  * Return: True if SVM range has a DMA mapping, False otherwise
142  */
143 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
144 {
145 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
146 	return range->base.flags.has_dma_mapping;
147 }
148 
149 #define xe_svm_assert_in_notifier(vm__) \
150 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
151 
152 #define xe_svm_notifier_lock(vm__)	\
153 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
154 
155 #define xe_svm_notifier_unlock(vm__)	\
156 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
157 
158 #endif
159