Lines Matching +full:memory +full:- +full:mapped
1 // SPDX-License-Identifier: GPL-2.0
5 * These functions handle creation of KMSAN metadata for memory allocations.
7 * Copyright (C) 2018-2022 Google LLC
13 #include <linux/dma-direction.h>
42 struct kmsan_ctx *ctx = &task->kmsan_ctx; in kmsan_task_exit()
47 ctx->allow_reporting = false; in kmsan_task_exit()
57 * There's a ctor or this is an RCU cache - do nothing. The memory in kmsan_slab_alloc()
60 if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU)) in kmsan_slab_alloc()
65 kmsan_internal_unpoison_memory(object, s->object_size, in kmsan_slab_alloc()
68 kmsan_internal_poison_memory(object, s->object_size, flags, in kmsan_slab_alloc()
79 if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))) in kmsan_slab_free()
82 * If there's a constructor, freed memory must remain in the same state in kmsan_slab_free()
84 * use-after-free bugs, instead we just keep it unpoisoned. in kmsan_slab_free()
86 if (s->ctor) in kmsan_slab_free()
89 kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL, in kmsan_slab_free()
147 * This function creates new shadow/origin pages for the physical pages mapped
148 * into the virtual memory. If those physical pages already had shadow/origin,
158 int nr, err = 0, clean = 0, mapped; in kmsan_ioremap_page_range() local
163 nr = (end - start) / PAGE_SIZE; in kmsan_ioremap_page_range()
169 err = -ENOMEM; in kmsan_ioremap_page_range()
172 mapped = __vmap_pages_range_noflush( in kmsan_ioremap_page_range()
176 if (mapped) { in kmsan_ioremap_page_range()
177 err = mapped; in kmsan_ioremap_page_range()
181 mapped = __vmap_pages_range_noflush( in kmsan_ioremap_page_range()
185 if (mapped) { in kmsan_ioremap_page_range()
189 err = mapped; in kmsan_ioremap_page_range()
230 nr = (end - start) / PAGE_SIZE; in kmsan_iounmap_page_range()
258 * At this point we've copied the memory already. It's hard to check it in kmsan_copy_to_user()
271 /* This is a user memory access, check it. */ in kmsan_copy_to_user()
272 kmsan_internal_check_memory((void *)from, to_copy - left, to, in kmsan_copy_to_user()
275 /* Otherwise this is a kernel memory access. This happens when a in kmsan_copy_to_user()
282 to_copy - left); in kmsan_copy_to_user()
294 kmsan_internal_check_memory(urb->transfer_buffer, in kmsan_handle_urb()
295 urb->transfer_buffer_length, in kmsan_handle_urb()
298 kmsan_internal_unpoison_memory(urb->transfer_buffer, in kmsan_handle_urb()
299 urb->transfer_buffer_length, in kmsan_handle_urb()
343 to_go = min(PAGE_SIZE - page_offset, (u64)size); in kmsan_handle_dma()
346 size -= to_go; in kmsan_handle_dma()
357 kmsan_handle_dma(sg_page(item), item->offset, item->length, in kmsan_handle_dma_sg()
361 /* Functions from kmsan-checks.h follow. */
367 /* The users may want to poison/unpoison random memory. */ in kmsan_poison_memory()
383 /* The users may want to poison/unpoison random memory. */ in kmsan_unpoison_memory()
395 * Non-instrumented IRQ entry functions receive struct pt_regs from assembly
399 * return value of in_task() is inconsistent - as a result, certain calls to