Lines Matching +full:page +full:- +full:size
1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
13 struct page;
32 #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
69 int kasan_add_zero_shadow(void *start, unsigned long size);
70 void kasan_remove_zero_shadow(void *start, unsigned long size);
80 static inline int kasan_add_zero_shadow(void *start, unsigned long size) in kasan_add_zero_shadow() argument
85 unsigned long size) in kasan_remove_zero_shadow() argument
105 void __kasan_unpoison_range(const void *addr, size_t size);
106 static __always_inline void kasan_unpoison_range(const void *addr, size_t size) in kasan_unpoison_range() argument
109 __kasan_unpoison_range(addr, size); in kasan_unpoison_range()
112 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
113 static __always_inline void kasan_poison_pages(struct page *page, in kasan_poison_pages() argument
117 __kasan_poison_pages(page, order, init); in kasan_poison_pages()
120 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
121 static __always_inline bool kasan_unpoison_pages(struct page *page, in kasan_unpoison_pages() argument
125 return __kasan_unpoison_pages(page, order, init); in kasan_unpoison_pages()
138 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
156 * kasan_poison_new_object - Repoison a new slab object.
184 * kasan_slab_pre_free - Check whether freeing a slab object is safe.
188 * check for double-free and invalid-free bugs and report them.
205 * kasan_slab_free - Poison, initialize, and quarantine a slab object.
255 size_t size, gfp_t flags);
257 const void *object, size_t size, gfp_t flags) in kasan_kmalloc() argument
260 return __kasan_kmalloc(s, object, size, flags); in kasan_kmalloc()
265 size_t size, gfp_t flags);
267 size_t size, gfp_t flags) in kasan_kmalloc_large() argument
270 return __kasan_kmalloc_large(ptr, size, flags); in kasan_kmalloc_large()
284 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
287 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
288 * @page: Pointer to the page allocation.
291 * This function is intended for kernel subsystems that cache page allocations
295 * page allocations.
302 static __always_inline bool kasan_mempool_poison_pages(struct page *page, in kasan_mempool_poison_pages() argument
306 return __kasan_mempool_poison_pages(page, order, _RET_IP_); in kasan_mempool_poison_pages()
310 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
313 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
314 * @page: Pointer to the page allocation.
317 * This function is intended for kernel subsystems that cache page allocations
320 * This function unpoisons a page allocation that was previously poisoned by
322 * the tag-based modes, this function assigns a new tag to the allocation.
324 static __always_inline void kasan_mempool_unpoison_pages(struct page *page, in kasan_mempool_unpoison_pages() argument
328 __kasan_mempool_unpoison_pages(page, order, _RET_IP_); in kasan_mempool_unpoison_pages()
333 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
344 * This function also performs checks to detect double-free and invalid-free
353 * size > KMALLOC_MAX_SIZE).
364 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
366 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
368 * @size: Size to be unpoisoned.
376 * initializing the allocation's memory. For the tag-based modes, this function
382 * size > KMALLOC_MAX_SIZE).
385 size_t size) in kasan_mempool_unpoison_object() argument
388 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_); in kasan_mempool_unpoison_object()
393 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
405 static inline void kasan_unpoison_range(const void *address, size_t size) {} in kasan_unpoison_range() argument
406 static inline void kasan_poison_pages(struct page *page, unsigned int order, in kasan_poison_pages() argument
408 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order, in kasan_unpoison_pages() argument
441 size_t size, gfp_t flags) in kasan_kmalloc() argument
445 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags) in kasan_kmalloc_large() argument
454 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order) in kasan_mempool_poison_pages() argument
458 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {} in kasan_mempool_unpoison_pages() argument
463 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {} in kasan_mempool_unpoison_object() argument
488 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
497 /* Tag-based KASAN modes do not use per-object metadata. */
503 /* And no cache-related metadata initialization is required. */
505 unsigned int *size, in kasan_cache_create() argument
522 * kasan_report - print a report about a bad memory access detected by KASAN
524 * @size: size of the bad access
528 bool kasan_report(const void *addr, size_t size,
564 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
565 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
574 unsigned long size) in kasan_populate_early_vm_area_shadow() argument
577 unsigned long size) in kasan_populate_vmalloc() argument
589 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
592 unsigned long size, in kasan_unpoison_vmalloc() argument
596 return __kasan_unpoison_vmalloc(start, size, flags); in kasan_unpoison_vmalloc()
600 void __kasan_poison_vmalloc(const void *start, unsigned long size);
602 unsigned long size) in kasan_poison_vmalloc() argument
605 __kasan_poison_vmalloc(start, size); in kasan_poison_vmalloc()
611 unsigned long size) { } in kasan_populate_early_vm_area_shadow() argument
613 unsigned long size) in kasan_populate_vmalloc() argument
624 unsigned long size, in kasan_unpoison_vmalloc() argument
629 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) in kasan_poison_vmalloc() argument
642 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
647 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } in kasan_alloc_module_shadow() argument