Home
last modified time | relevance | path

Searched full:memory (Results 1 – 25 of 151) sorted by relevance

1234567

/kvm-unit-tests/lib/
H A Dalloc_phys.h4 * phys_alloc is a very simple allocator which allows physical memory
5 * to be partitioned into regions until all memory is allocated.
8 * a region. For more complicated memory management a single region
9 * can be allocated, but then have its memory managed by a more
21 * phys_alloc_init creates the initial free memory region of size @size
40 * phys_alloc_get_unused allocates all remaining memory from the region
41 * passed to phys_alloc_init, returning the newly allocated memory's base
43 * when no free memory is remaining, but base will equal top.
48 * Search for memory that can only be used when the MMU is on, and reinitialize
49 * the physical memory allocator using it.
H A Dalloc_page.h29 * Initializes a memory area.
41 * Allocate aligned memory with the specified flags.
48 * Allocate aligned memory from any area and with default flags.
82 * Frees a memory block allocated with any of the memalign_pages* or
107 * Reserves the specified physical memory range if possible.
116 * Frees a reserved memory range that had been reserved with
118 * The memory range does not need to match a previous allocation
H A Dalloc_page.c64 * Each memory area contains an array of metadata entries at the very
65 * beginning. The usable memory follows immediately afterwards.
67 * memory area, including the metadata area.
75 * Each memory area contains an array of metadata entries at the very
76 * beginning. The usable memory follows immediately afterwards.
78 * the given memory area.
90 * - the block must be within the memory area
138 * If there is not enough free memory, NULL is returned.
158 /* search all free lists for some memory */ in page_memalign_order()
169 /* out of memory */ in page_memalign_order()
[all …]
H A Dalloc.h5 * related to the support of dynamic memory allocation.
9 * same interface for memory allocation at all stages, even though the
16 * The third is a very simple physical memory allocator, which the
/kvm-unit-tests/powerpc/
H A Dmmu.c22 static void *memory; variable
32 volatile char *m = memory; in tlbie_fn()
77 memory = alloc_vpages(1); in test_tlbie()
78 ptep = install_page(NULL, p[0], memory); in test_tlbie()
80 assert(ptep == install_page(NULL, p[1], memory)); in test_tlbie()
82 assert(ptep == install_page(NULL, p[0], memory)); in test_tlbie()
84 flush_tlb_page((unsigned long)memory); in test_tlbie()
95 flush_tlb_page((unsigned long)memory); in test_tlbie()
101 flush_tlb_page((unsigned long)memory); in test_tlbie()
141 memory = alloc_vpages(1); in test_tlbie_this_cpu()
[all …]
H A Dmemory-verify.c3 * Simple memory verification test, used to exercise dirty memory migration.
41 report_prefix_push("memory"); in main()
63 report(success, "memory verification stress test"); in main()
H A Datomics.c35 : "=&r"(old) : "r"(1), "r"(lock) : "cr0", "memory"); in spin_lock()
42 : "+m"(*lock) : "r"(0) : "memory"); in spin_unlock()
67 : "=&r"(old) : "r"(1), "r"(var) : "cr0", "memory"); in test_lwarx_stwcx()
78 : "r"(1), "r"(var) : "cr0", "memory"); in test_lwarx_stwcx()
90 : "r"(1), "r"(2), "r"(var) : "cr0", "memory"); in test_lwarx_stwcx()
122 : "cr0", "memory"); in test_lwarx_stwcx()
172 : "cr0", "memory"); in test_lqarx_stqcx()
189 : "cr0", "memory"); in test_lqarx_stqcx()
206 : "cr0", "memory"); in test_lqarx_stqcx()
226 asm volatile ("lwarx %0,0,%1" : "=&r"(old) : "r"(var) : "memory"); in test_migrate_reserve()
[all …]
/kvm-unit-tests/lib/arm64/asm/
H A Dbarrier.h11 #define sev() asm volatile("sev" : : : "memory")
12 #define wfe() asm volatile("wfe" : : : "memory")
13 #define wfi() asm volatile("wfi" : : : "memory")
14 #define yield() asm volatile("yield" : : : "memory")
17 #define isb() asm volatile("isb" : : : "memory")
18 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
19 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
/kvm-unit-tests/lib/arm/asm/
H A Dbarrier.h11 #define sev() asm volatile("sev" : : : "memory")
12 #define wfe() asm volatile("wfe" : : : "memory")
13 #define wfi() asm volatile("wfi" : : : "memory")
14 #define yield() asm volatile("yield" : : : "memory")
17 #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
18 #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
19 #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
/kvm-unit-tests/lib/ppc64/asm/
H A Dbarrier.h4 #define cpu_relax() asm volatile("or 1,1,1 ; or 2,2,2" ::: "memory")
5 #define pause_short() asm volatile(".long 0x7c40003c" ::: "memory")
7 #define mb() asm volatile("sync":::"memory")
8 #define rmb() asm volatile("sync":::"memory")
9 #define wmb() asm volatile("sync":::"memory")
/kvm-unit-tests/s390x/snippets/lib/
H A Dsnippet-exit.h16 mb(); /* host may read any memory written by the guest before */ in force_exit()
18 mb(); /* allow host to modify guest memory */ in force_exit()
23 mb(); /* host may read any memory written by the guest before */ in force_exit_value()
25 mb(); /* allow host to modify guest memory */ in force_exit_value()
/kvm-unit-tests/lib/x86/asm/
H A Dbarrier.h9 #define mb() asm volatile("mfence":::"memory")
10 #define rmb() asm volatile("lfence":::"memory")
11 #define wmb() asm volatile("sfence":::"memory")
19 asm volatile("rep; nop" ::: "memory"); in rep_nop()
/kvm-unit-tests/lib/asm-generic/
H A Dbarrier.h12 #define mb() asm volatile("":::"memory")
15 #define rmb() asm volatile("":::"memory")
18 #define wmb() asm volatile("":::"memory")
32 #define cpu_relax() asm volatile ("":::"memory")
/kvm-unit-tests/lib/powerpc/asm/
H A Dprocessor.h31 asm volatile("mfspr %0,%1" : "=r"(ret) : "i"(nr) : "memory"); in mfspr()
38 asm volatile("mtspr %0,%1" : : "i"(nr), "r"(val) : "memory"); in mtspr()
45 asm volatile ("mfmsr %[msr]" : [msr] "=r" (msr) :: "memory"); in mfmsr()
52 asm volatile ("mtmsrd %[msr]" :: [msr] "r" (msr) : "memory"); in mtmsr()
65 : "=r"(msr) : "i"(MSR_EE): "memory"); in local_irq_enable()
78 : "=r"(msr) : "r"(MSR_EE): "memory"); in local_irq_disable()
/kvm-unit-tests/lib/riscv/asm/
H A Dcsr.h68 : "memory"); \
77 : "memory"); \
86 : "memory"); \
94 : "memory"); \
103 : "memory"); \
111 : "memory"); \
120 : "memory"); \
H A Dbarrier.h6 __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
8 /* These barriers need to enforce ordering on both devices or memory. */
13 /* These barriers do not need to enforce ordering on devices, just memory. */
/kvm-unit-tests/x86/efi/
H A Drun46 # Run test case with 256MiB QEMU memory. QEMU default memory size is 128MiB.
48 # memory region is ~42MiB. Although this is sufficient for many test cases to
49 # run in UEFI, some test cases, e.g. `x86/pmu.c`, require more free memory. A
50 # simple fix is to increase the QEMU default memory size to 256MiB so that
51 # UEFI's largest allocatable memory region is large enough.
/kvm-unit-tests/s390x/
H A Dmemory-verify.c3 * Simple memory verification test, used to exercise dirty memory migration.
41 report_prefix_push("memory"); in main()
63 report(success, "memory verification stress test"); in main()
H A Dselftest.c50 report((uintptr_t)tmp & 0xf000000000000000ul, "allocated memory"); in test_malloc()
53 report(*tmp == 123456789, "wrote allocated memory"); in test_malloc()
59 "allocated memory"); in test_malloc()
62 report((*tmp2 == 123456789), "wrote allocated memory"); in test_malloc()
65 report(tmp != tmp2, "allocated memory addresses differ"); in test_malloc()
/kvm-unit-tests/common/
H A Dmemory-verify.c3 * Simple memory verification test, used to exercise dirty memory migration.
41 report_prefix_push("memory"); in main()
63 report(success, "memory verification stress test"); in main()
/kvm-unit-tests/lib/s390x/asm/
H A Dcpacf.h273 * @dest: address of destination memory area
274 * @src: address of source memory area
294 : "cc", "memory"); in cpacf_km()
303 * @dest: address of destination memory area
304 * @src: address of source memory area
324 : "cc", "memory"); in cpacf_kmc()
334 * @src: address of source memory area
350 : "cc", "memory"); in cpacf_kimd()
357 * @src: address of source memory area
373 : "cc", "memory"); in cpacf_klmd()
[all …]
/kvm-unit-tests/lib/s390x/
H A Dsie.c132 /* Guest memory chunks are always 1MB */ in sie_guest_create()
147 * sie_guest_alloc() - Allocate memory for a guest and map it in virtual address
162 * Start of guest memory in host virtual space needs to be aligned to in sie_guest_alloc()
163 * 2GB for some environments. It also can't be at 2GB since the memory in sie_guest_alloc()
166 * mapping. This also leaves space after end of physical memory so the in sie_guest_alloc()
167 * page immediately after physical memory is guaranteed not to be in sie_guest_alloc()
175 * Establish a new mapping of the guest memory so it can be 2GB aligned in sie_guest_alloc()
176 * without actually requiring 2GB physical memory. in sie_guest_alloc()
186 /* Frees the memory that was gathered on initialization */
/kvm-unit-tests/x86/
H A Dasyncpf.c3 * in memory cgroup with 512M of memory and with more than 1G memory provided
18 * echo 512M > /dev/cgroup/1/memory.limit_in_bytes
23 * echo 512M > /sys/fs/cgroup/cg1/memory.max
139 /* access a lot of memory to make host swap it out */ in main()
H A Dlam.c45 : "memory"); in do_mov()
91 is_mmio ? "MMIO" : "memory"); in test_ptr()
96 is_mmio ? "MMIO" : "memory", lam_active ? "with" : "without", in test_ptr()
107 is_mmio ? "MMIO" : "memory"); in test_ptr()
124 asm volatile(KVM_FEP "invlpg (%0)" ::"r" (ptr) : "memory"); in test_invlpg()
135 * Reuse the memory address for the descriptor since stack memory in test_invpcid()
181 /* Test for normal memory. */ in __test_lam_sup()
257 * Physical memory & MMIO have already been identical mapped in in test_lam_user()
/kvm-unit-tests/lib/x86/
H A Dprocessor.h343 asm volatile (".byte 0x0f, 0x01, 0xca" : : : "memory"); in clac()
348 asm volatile (".byte 0x0f, 0x01, 0xcb" : : : "memory"); in stac()
408 asm volatile ("mov %0, %%ds" : : "rm"(val) : "memory"); in write_ds()
413 asm volatile ("mov %0, %%es" : : "rm"(val) : "memory"); in write_es()
418 asm volatile ("mov %0, %%ss" : : "rm"(val) : "memory"); in write_ss()
423 asm volatile ("mov %0, %%fs" : : "rm"(val) : "memory"); in write_fs()
428 asm volatile ("mov %0, %%gs" : : "rm"(val) : "memory"); in write_gs()
452 asm volatile ("rdmsr" : "=a"(a), "=d"(d) : "c"(index) : "memory"); in rdmsr()
459 asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory"); in wrmsr()
550 asm volatile ("mov %%cr0, %0" : "=r"(val) : : "memory"); in read_cr0()
[all …]

1234567