1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #ifndef _XE_SVM_H_
7 #define _XE_SVM_H_
8 
9 #include <drm/drm_pagemap.h>
10 #include <drm/drm_gpusvm.h>
11 
12 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
13 
14 struct xe_bo;
15 struct xe_vram_region;
16 struct xe_tile;
17 struct xe_vm;
18 struct xe_vma;
19 
20 /** struct xe_svm_range - SVM range */
21 struct xe_svm_range {
22 	/** @base: base drm_gpusvm_range */
23 	struct drm_gpusvm_range base;
24 	/**
25 	 * @garbage_collector_link: Link into VM's garbage collect SVM range
26 	 * list. Protected by VM's garbage collect lock.
27 	 */
28 	struct list_head garbage_collector_link;
29 	/**
30 	 * @tile_present: Tile mask of binding is present for this range.
31 	 * Protected by GPU SVM notifier lock.
32 	 */
33 	u8 tile_present;
34 	/**
35 	 * @tile_invalidated: Tile mask of binding is invalidated for this
36 	 * range. Protected by GPU SVM notifier lock.
37 	 */
38 	u8 tile_invalidated;
39 };
40 
41 #if IS_ENABLED(CONFIG_DRM_GPUSVM)
42 /**
43  * xe_svm_range_pages_valid() - SVM range pages valid
44  * @range: SVM range
45  *
46  * Return: True if SVM range pages are valid, False otherwise
47  */
xe_svm_range_pages_valid(struct xe_svm_range * range)48 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
49 {
50 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
51 }
52 
53 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
54 
55 int xe_svm_init(struct xe_vm *vm);
56 
57 void xe_svm_fini(struct xe_vm *vm);
58 
59 void xe_svm_close(struct xe_vm *vm);
60 
61 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
62 			    struct xe_tile *tile, u64 fault_addr,
63 			    bool atomic);
64 
65 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
66 
67 int xe_svm_bo_evict(struct xe_bo *bo);
68 
69 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
70 
71 void xe_svm_flush(struct xe_vm *vm);
72 
73 #else
xe_svm_range_pages_valid(struct xe_svm_range * range)74 static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
75 {
76 	return false;
77 }
78 
79 static inline
xe_devm_add(struct xe_tile * tile,struct xe_vram_region * vr)80 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
81 {
82 	return 0;
83 }
84 
85 static inline
xe_svm_init(struct xe_vm * vm)86 int xe_svm_init(struct xe_vm *vm)
87 {
88 	return 0;
89 }
90 
91 static inline
xe_svm_fini(struct xe_vm * vm)92 void xe_svm_fini(struct xe_vm *vm)
93 {
94 }
95 
96 static inline
xe_svm_close(struct xe_vm * vm)97 void xe_svm_close(struct xe_vm *vm)
98 {
99 }
100 
101 static inline
xe_svm_handle_pagefault(struct xe_vm * vm,struct xe_vma * vma,struct xe_tile * tile,u64 fault_addr,bool atomic)102 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
103 			    struct xe_tile *tile, u64 fault_addr,
104 			    bool atomic)
105 {
106 	return 0;
107 }
108 
109 static inline
xe_svm_has_mapping(struct xe_vm * vm,u64 start,u64 end)110 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
111 {
112 	return false;
113 }
114 
115 static inline
xe_svm_bo_evict(struct xe_bo * bo)116 int xe_svm_bo_evict(struct xe_bo *bo)
117 {
118 	return 0;
119 }
120 
121 static inline
xe_svm_range_debug(struct xe_svm_range * range,const char * operation)122 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
123 {
124 }
125 
xe_svm_flush(struct xe_vm * vm)126 static inline void xe_svm_flush(struct xe_vm *vm)
127 {
128 }
129 
130 #endif
131 
132 /**
133  * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
134  * @range: SVM range
135  *
136  * Return: True if SVM range has a DMA mapping, False otherwise
137  */
xe_svm_range_has_dma_mapping(struct xe_svm_range * range)138 static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
139 {
140 	lockdep_assert_held(&range->base.gpusvm->notifier_lock);
141 	return range->base.flags.has_dma_mapping;
142 }
143 
144 #define xe_svm_assert_in_notifier(vm__) \
145 	lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
146 
147 #define xe_svm_notifier_lock(vm__)	\
148 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
149 
150 #define xe_svm_notifier_unlock(vm__)	\
151 	drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
152 
153 #endif
154