1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_KASAN_H
3 #define _LINUX_KASAN_H
4 
5 #include <linux/bug.h>
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kernel.h>
9 #include <linux/static_key.h>
10 #include <linux/types.h>
11 
12 struct kmem_cache;
13 struct page;
14 struct slab;
15 struct vm_struct;
16 struct task_struct;
17 
18 #ifdef CONFIG_KASAN
19 
20 #include <linux/linkage.h>
21 #include <asm/kasan.h>
22 
23 #endif
24 
25 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
26 
27 #define KASAN_VMALLOC_NONE		((__force kasan_vmalloc_flags_t)0x00u)
28 #define KASAN_VMALLOC_INIT		((__force kasan_vmalloc_flags_t)0x01u)
29 #define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
30 #define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)
31 
32 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
33 
34 #include <linux/pgtable.h>
35 
36 /* Software KASAN implementations use shadow memory. */
37 
38 #ifdef CONFIG_KASAN_SW_TAGS
39 /* This matches KASAN_TAG_INVALID. */
40 #define KASAN_SHADOW_INIT 0xFE
41 #else
42 #define KASAN_SHADOW_INIT 0
43 #endif
44 
45 #ifndef PTE_HWTABLE_PTRS
46 #define PTE_HWTABLE_PTRS 0
47 #endif
48 
49 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
54 
55 int kasan_populate_early_shadow(const void *shadow_start,
56 				const void *shadow_end);
57 
58 #ifndef kasan_mem_to_shadow
kasan_mem_to_shadow(const void * addr)59 static inline void *kasan_mem_to_shadow(const void *addr)
60 {
61 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
62 		+ KASAN_SHADOW_OFFSET;
63 }
64 #endif
65 
66 int kasan_add_zero_shadow(void *start, unsigned long size);
67 void kasan_remove_zero_shadow(void *start, unsigned long size);
68 
69 /* Enable reporting bugs after kasan_disable_current() */
70 extern void kasan_enable_current(void);
71 
72 /* Disable reporting bugs for current task */
73 extern void kasan_disable_current(void);
74 
75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
76 
kasan_add_zero_shadow(void * start,unsigned long size)77 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
78 {
79 	return 0;
80 }
kasan_remove_zero_shadow(void * start,unsigned long size)81 static inline void kasan_remove_zero_shadow(void *start,
82 					unsigned long size)
83 {}
84 
kasan_enable_current(void)85 static inline void kasan_enable_current(void) {}
kasan_disable_current(void)86 static inline void kasan_disable_current(void) {}
87 
88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
89 
90 #ifdef CONFIG_KASAN_HW_TAGS
91 
92 #else /* CONFIG_KASAN_HW_TAGS */
93 
94 #endif /* CONFIG_KASAN_HW_TAGS */
95 
kasan_has_integrated_init(void)96 static inline bool kasan_has_integrated_init(void)
97 {
98 	return kasan_hw_tags_enabled();
99 }
100 
101 #ifdef CONFIG_KASAN
102 void __kasan_unpoison_range(const void *addr, size_t size);
kasan_unpoison_range(const void * addr,size_t size)103 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
104 {
105 	if (kasan_enabled())
106 		__kasan_unpoison_range(addr, size);
107 }
108 
109 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
kasan_poison_pages(struct page * page,unsigned int order,bool init)110 static __always_inline void kasan_poison_pages(struct page *page,
111 						unsigned int order, bool init)
112 {
113 	if (kasan_enabled())
114 		__kasan_poison_pages(page, order, init);
115 }
116 
117 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)118 static __always_inline bool kasan_unpoison_pages(struct page *page,
119 						 unsigned int order, bool init)
120 {
121 	if (kasan_enabled())
122 		return __kasan_unpoison_pages(page, order, init);
123 	return false;
124 }
125 
126 void __kasan_poison_slab(struct slab *slab);
kasan_poison_slab(struct slab * slab)127 static __always_inline void kasan_poison_slab(struct slab *slab)
128 {
129 	if (kasan_enabled())
130 		__kasan_poison_slab(slab);
131 }
132 
133 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
134 /**
135  * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
136  * @cache: Cache the object belong to.
137  * @object: Pointer to the object.
138  *
139  * This function is intended for the slab allocator's internal use. It
140  * temporarily unpoisons an object from a newly allocated slab without doing
141  * anything else. The object must later be repoisoned by
142  * kasan_poison_new_object().
143  */
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)144 static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
145 							void *object)
146 {
147 	if (kasan_enabled())
148 		__kasan_unpoison_new_object(cache, object);
149 }
150 
151 void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
152 /**
153  * kasan_unpoison_new_object - Repoison a new slab object.
154  * @cache: Cache the object belong to.
155  * @object: Pointer to the object.
156  *
157  * This function is intended for the slab allocator's internal use. It
158  * repoisons an object that was previously unpoisoned by
159  * kasan_unpoison_new_object() without doing anything else.
160  */
kasan_poison_new_object(struct kmem_cache * cache,void * object)161 static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
162 							void *object)
163 {
164 	if (kasan_enabled())
165 		__kasan_poison_new_object(cache, object);
166 }
167 
168 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
169 					  const void *object);
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)170 static __always_inline void * __must_check kasan_init_slab_obj(
171 				struct kmem_cache *cache, const void *object)
172 {
173 	if (kasan_enabled())
174 		return __kasan_init_slab_obj(cache, object);
175 	return (void *)object;
176 }
177 
178 bool __kasan_slab_free(struct kmem_cache *s, void *object,
179 			unsigned long ip, bool init);
kasan_slab_free(struct kmem_cache * s,void * object,bool init)180 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
181 						void *object, bool init)
182 {
183 	if (kasan_enabled())
184 		return __kasan_slab_free(s, object, _RET_IP_, init);
185 	return false;
186 }
187 
188 void __kasan_kfree_large(void *ptr, unsigned long ip);
kasan_kfree_large(void * ptr)189 static __always_inline void kasan_kfree_large(void *ptr)
190 {
191 	if (kasan_enabled())
192 		__kasan_kfree_large(ptr, _RET_IP_);
193 }
194 
195 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
196 				       void *object, gfp_t flags, bool init);
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)197 static __always_inline void * __must_check kasan_slab_alloc(
198 		struct kmem_cache *s, void *object, gfp_t flags, bool init)
199 {
200 	if (kasan_enabled())
201 		return __kasan_slab_alloc(s, object, flags, init);
202 	return object;
203 }
204 
205 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
206 				    size_t size, gfp_t flags);
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)207 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
208 				const void *object, size_t size, gfp_t flags)
209 {
210 	if (kasan_enabled())
211 		return __kasan_kmalloc(s, object, size, flags);
212 	return (void *)object;
213 }
214 
215 void * __must_check __kasan_kmalloc_large(const void *ptr,
216 					  size_t size, gfp_t flags);
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)217 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
218 						      size_t size, gfp_t flags)
219 {
220 	if (kasan_enabled())
221 		return __kasan_kmalloc_large(ptr, size, flags);
222 	return (void *)ptr;
223 }
224 
225 void * __must_check __kasan_krealloc(const void *object,
226 				     size_t new_size, gfp_t flags);
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)227 static __always_inline void * __must_check kasan_krealloc(const void *object,
228 						 size_t new_size, gfp_t flags)
229 {
230 	if (kasan_enabled())
231 		return __kasan_krealloc(object, new_size, flags);
232 	return (void *)object;
233 }
234 
235 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
236 				  unsigned long ip);
237 /**
238  * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
239  * @page: Pointer to the page allocation.
240  * @order: Order of the allocation.
241  *
242  * This function is intended for kernel subsystems that cache page allocations
243  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
244  *
245  * This function is similar to kasan_mempool_poison_object() but operates on
246  * page allocations.
247  *
248  * Before the poisoned allocation can be reused, it must be unpoisoned via
249  * kasan_mempool_unpoison_pages().
250  *
251  * Return: true if the allocation can be safely reused; false otherwise.
252  */
kasan_mempool_poison_pages(struct page * page,unsigned int order)253 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
254 						       unsigned int order)
255 {
256 	if (kasan_enabled())
257 		return __kasan_mempool_poison_pages(page, order, _RET_IP_);
258 	return true;
259 }
260 
261 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
262 				    unsigned long ip);
263 /**
264  * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
265  * @page: Pointer to the page allocation.
266  * @order: Order of the allocation.
267  *
268  * This function is intended for kernel subsystems that cache page allocations
269  * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
270  *
271  * This function unpoisons a page allocation that was previously poisoned by
272  * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
273  * the tag-based modes, this function assigns a new tag to the allocation.
274  */
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)275 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
276 							 unsigned int order)
277 {
278 	if (kasan_enabled())
279 		__kasan_mempool_unpoison_pages(page, order, _RET_IP_);
280 }
281 
282 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
283 /**
284  * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
285  * @ptr: Pointer to the slab allocation.
286  *
287  * This function is intended for kernel subsystems that cache slab allocations
288  * to reuse them instead of freeing them back to the slab allocator (e.g.
289  * mempool).
290  *
291  * This function poisons a slab allocation and saves a free stack trace for it
292  * without initializing the allocation's memory and without putting it into the
293  * quarantine (for the Generic mode).
294  *
295  * This function also performs checks to detect double-free and invalid-free
296  * bugs and reports them. The caller can use the return value of this function
297  * to find out if the allocation is buggy.
298  *
299  * Before the poisoned allocation can be reused, it must be unpoisoned via
300  * kasan_mempool_unpoison_object().
301  *
302  * This function operates on all slab allocations including large kmalloc
303  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
304  * size > KMALLOC_MAX_SIZE).
305  *
306  * Return: true if the allocation can be safely reused; false otherwise.
307  */
kasan_mempool_poison_object(void * ptr)308 static __always_inline bool kasan_mempool_poison_object(void *ptr)
309 {
310 	if (kasan_enabled())
311 		return __kasan_mempool_poison_object(ptr, _RET_IP_);
312 	return true;
313 }
314 
315 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
316 /**
317  * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
318  * @ptr: Pointer to the slab allocation.
319  * @size: Size to be unpoisoned.
320  *
321  * This function is intended for kernel subsystems that cache slab allocations
322  * to reuse them instead of freeing them back to the slab allocator (e.g.
323  * mempool).
324  *
325  * This function unpoisons a slab allocation that was previously poisoned via
326  * kasan_mempool_poison_object() and saves an alloc stack trace for it without
327  * initializing the allocation's memory. For the tag-based modes, this function
328  * does not assign a new tag to the allocation and instead restores the
329  * original tags based on the pointer value.
330  *
331  * This function operates on all slab allocations including large kmalloc
332  * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
333  * size > KMALLOC_MAX_SIZE).
334  */
kasan_mempool_unpoison_object(void * ptr,size_t size)335 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
336 							  size_t size)
337 {
338 	if (kasan_enabled())
339 		__kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
340 }
341 
342 /*
343  * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
344  * the hardware tag-based mode that doesn't rely on compiler instrumentation.
345  */
346 bool __kasan_check_byte(const void *addr, unsigned long ip);
kasan_check_byte(const void * addr)347 static __always_inline bool kasan_check_byte(const void *addr)
348 {
349 	if (kasan_enabled())
350 		return __kasan_check_byte(addr, _RET_IP_);
351 	return true;
352 }
353 
354 #else /* CONFIG_KASAN */
355 
kasan_unpoison_range(const void * address,size_t size)356 static inline void kasan_unpoison_range(const void *address, size_t size) {}
kasan_poison_pages(struct page * page,unsigned int order,bool init)357 static inline void kasan_poison_pages(struct page *page, unsigned int order,
358 				      bool init) {}
kasan_unpoison_pages(struct page * page,unsigned int order,bool init)359 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
360 					bool init)
361 {
362 	return false;
363 }
kasan_poison_slab(struct slab * slab)364 static inline void kasan_poison_slab(struct slab *slab) {}
kasan_unpoison_new_object(struct kmem_cache * cache,void * object)365 static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
366 					void *object) {}
kasan_poison_new_object(struct kmem_cache * cache,void * object)367 static inline void kasan_poison_new_object(struct kmem_cache *cache,
368 					void *object) {}
kasan_init_slab_obj(struct kmem_cache * cache,const void * object)369 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
370 				const void *object)
371 {
372 	return (void *)object;
373 }
kasan_slab_free(struct kmem_cache * s,void * object,bool init)374 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
375 {
376 	return false;
377 }
kasan_kfree_large(void * ptr)378 static inline void kasan_kfree_large(void *ptr) {}
kasan_slab_alloc(struct kmem_cache * s,void * object,gfp_t flags,bool init)379 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
380 				   gfp_t flags, bool init)
381 {
382 	return object;
383 }
kasan_kmalloc(struct kmem_cache * s,const void * object,size_t size,gfp_t flags)384 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
385 				size_t size, gfp_t flags)
386 {
387 	return (void *)object;
388 }
kasan_kmalloc_large(const void * ptr,size_t size,gfp_t flags)389 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
390 {
391 	return (void *)ptr;
392 }
kasan_krealloc(const void * object,size_t new_size,gfp_t flags)393 static inline void *kasan_krealloc(const void *object, size_t new_size,
394 				 gfp_t flags)
395 {
396 	return (void *)object;
397 }
kasan_mempool_poison_pages(struct page * page,unsigned int order)398 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
399 {
400 	return true;
401 }
kasan_mempool_unpoison_pages(struct page * page,unsigned int order)402 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
kasan_mempool_poison_object(void * ptr)403 static inline bool kasan_mempool_poison_object(void *ptr)
404 {
405 	return true;
406 }
kasan_mempool_unpoison_object(void * ptr,size_t size)407 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
408 
kasan_check_byte(const void * address)409 static inline bool kasan_check_byte(const void *address)
410 {
411 	return true;
412 }
413 
414 #endif /* CONFIG_KASAN */
415 
416 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
417 void kasan_unpoison_task_stack(struct task_struct *task);
418 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
419 #else
kasan_unpoison_task_stack(struct task_struct * task)420 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
kasan_unpoison_task_stack_below(const void * watermark)421 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
422 #endif
423 
424 #ifdef CONFIG_KASAN_GENERIC
425 
426 struct kasan_cache {
427 	int alloc_meta_offset;
428 	int free_meta_offset;
429 };
430 
431 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
432 slab_flags_t kasan_never_merge(void);
433 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
434 			slab_flags_t *flags);
435 
436 void kasan_cache_shrink(struct kmem_cache *cache);
437 void kasan_cache_shutdown(struct kmem_cache *cache);
438 void kasan_record_aux_stack(void *ptr);
439 void kasan_record_aux_stack_noalloc(void *ptr);
440 
441 #else /* CONFIG_KASAN_GENERIC */
442 
443 /* Tag-based KASAN modes do not use per-object metadata. */
kasan_metadata_size(struct kmem_cache * cache,bool in_object)444 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
445 						bool in_object)
446 {
447 	return 0;
448 }
449 /* And thus nothing prevents cache merging. */
kasan_never_merge(void)450 static inline slab_flags_t kasan_never_merge(void)
451 {
452 	return 0;
453 }
454 /* And no cache-related metadata initialization is required. */
kasan_cache_create(struct kmem_cache * cache,unsigned int * size,slab_flags_t * flags)455 static inline void kasan_cache_create(struct kmem_cache *cache,
456 				      unsigned int *size,
457 				      slab_flags_t *flags) {}
458 
kasan_cache_shrink(struct kmem_cache * cache)459 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
kasan_cache_shutdown(struct kmem_cache * cache)460 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
kasan_record_aux_stack(void * ptr)461 static inline void kasan_record_aux_stack(void *ptr) {}
kasan_record_aux_stack_noalloc(void * ptr)462 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
463 
464 #endif /* CONFIG_KASAN_GENERIC */
465 
466 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
467 
kasan_reset_tag(const void * addr)468 static inline void *kasan_reset_tag(const void *addr)
469 {
470 	return (void *)arch_kasan_reset_tag(addr);
471 }
472 
473 /**
474  * kasan_report - print a report about a bad memory access detected by KASAN
475  * @addr: address of the bad access
476  * @size: size of the bad access
477  * @is_write: whether the bad access is a write or a read
478  * @ip: instruction pointer for the accessibility check or the bad access itself
479  */
480 bool kasan_report(const void *addr, size_t size,
481 		bool is_write, unsigned long ip);
482 
483 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
484 
kasan_reset_tag(const void * addr)485 static inline void *kasan_reset_tag(const void *addr)
486 {
487 	return (void *)addr;
488 }
489 
490 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
491 
492 #ifdef CONFIG_KASAN_HW_TAGS
493 
494 void kasan_report_async(void);
495 
496 #endif /* CONFIG_KASAN_HW_TAGS */
497 
498 #ifdef CONFIG_KASAN_SW_TAGS
499 void __init kasan_init_sw_tags(void);
500 #else
kasan_init_sw_tags(void)501 static inline void kasan_init_sw_tags(void) { }
502 #endif
503 
504 #ifdef CONFIG_KASAN_HW_TAGS
505 void kasan_init_hw_tags_cpu(void);
506 void __init kasan_init_hw_tags(void);
507 #else
kasan_init_hw_tags_cpu(void)508 static inline void kasan_init_hw_tags_cpu(void) { }
kasan_init_hw_tags(void)509 static inline void kasan_init_hw_tags(void) { }
510 #endif
511 
512 #ifdef CONFIG_KASAN_VMALLOC
513 
514 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
515 
516 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
517 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
518 void kasan_release_vmalloc(unsigned long start, unsigned long end,
519 			   unsigned long free_region_start,
520 			   unsigned long free_region_end);
521 
522 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
523 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)524 static inline void kasan_populate_early_vm_area_shadow(void *start,
525 						       unsigned long size)
526 { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)527 static inline int kasan_populate_vmalloc(unsigned long start,
528 					unsigned long size)
529 {
530 	return 0;
531 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)532 static inline void kasan_release_vmalloc(unsigned long start,
533 					 unsigned long end,
534 					 unsigned long free_region_start,
535 					 unsigned long free_region_end) { }
536 
537 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
538 
539 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
540 			       kasan_vmalloc_flags_t flags);
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)541 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
542 						unsigned long size,
543 						kasan_vmalloc_flags_t flags)
544 {
545 	if (kasan_enabled())
546 		return __kasan_unpoison_vmalloc(start, size, flags);
547 	return (void *)start;
548 }
549 
550 void __kasan_poison_vmalloc(const void *start, unsigned long size);
kasan_poison_vmalloc(const void * start,unsigned long size)551 static __always_inline void kasan_poison_vmalloc(const void *start,
552 						 unsigned long size)
553 {
554 	if (kasan_enabled())
555 		__kasan_poison_vmalloc(start, size);
556 }
557 
558 #else /* CONFIG_KASAN_VMALLOC */
559 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)560 static inline void kasan_populate_early_vm_area_shadow(void *start,
561 						       unsigned long size) { }
kasan_populate_vmalloc(unsigned long start,unsigned long size)562 static inline int kasan_populate_vmalloc(unsigned long start,
563 					unsigned long size)
564 {
565 	return 0;
566 }
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)567 static inline void kasan_release_vmalloc(unsigned long start,
568 					 unsigned long end,
569 					 unsigned long free_region_start,
570 					 unsigned long free_region_end) { }
571 
kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)572 static inline void *kasan_unpoison_vmalloc(const void *start,
573 					   unsigned long size,
574 					   kasan_vmalloc_flags_t flags)
575 {
576 	return (void *)start;
577 }
kasan_poison_vmalloc(const void * start,unsigned long size)578 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
579 { }
580 
581 #endif /* CONFIG_KASAN_VMALLOC */
582 
583 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
584 		!defined(CONFIG_KASAN_VMALLOC)
585 
586 /*
587  * These functions allocate and free shadow memory for kernel modules.
588  * They are only required when KASAN_VMALLOC is not supported, as otherwise
589  * shadow memory is allocated by the generic vmalloc handlers.
590  */
591 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
592 void kasan_free_module_shadow(const struct vm_struct *vm);
593 
594 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
595 
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)596 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
kasan_free_module_shadow(const struct vm_struct * vm)597 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
598 
599 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
600 
601 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
602 void kasan_non_canonical_hook(unsigned long addr);
603 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
kasan_non_canonical_hook(unsigned long addr)604 static inline void kasan_non_canonical_hook(unsigned long addr) { }
605 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
606 
607 #endif /* LINUX_KASAN_H */
608