xref: /kvm-unit-tests/lib/vmalloc.c (revision dcda215bee0e3d21eefba1b3658ba384952b5576)
1 /*
2  * Copyright (C) 2012, 2017, Red Hat Inc.
3  *
4  * This allocator provides contiguous physical addresses with page
5  * granularity.
6  */
7 
8 #include "libcflat.h"
9 #include "asm/spinlock.h"
10 #include "asm/page.h"
11 #include "asm/io.h"
12 #include "alloc.h"
13 #include "alloc_phys.h"
14 #include "alloc_page.h"
15 #include "vmalloc.h"
16 
17 static struct spinlock lock;
18 static void *vfree_top = 0;
19 static void *page_root;
20 
21 void *alloc_vpages(ulong nr)
22 {
23 	spin_lock(&lock);
24 	vfree_top -= PAGE_SIZE * nr;
25 	spin_unlock(&lock);
26 	return vfree_top;
27 }
28 
29 void *alloc_vpage(void)
30 {
31 	return alloc_vpages(1);
32 }
33 
34 void init_alloc_vpage(void *top)
35 {
36 	vfree_top = top;
37 }
38 
39 void *vmap(phys_addr_t phys, size_t size)
40 {
41 	void *mem, *p;
42 	unsigned pages;
43 
44 	size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
45 	pages = size / PAGE_SIZE;
46 	mem = p = alloc_vpages(pages);
47 
48 	phys &= ~(unsigned long long)(PAGE_SIZE - 1);
49 	while (pages--) {
50 		install_page(page_root, phys, p);
51 		phys += PAGE_SIZE;
52 		p += PAGE_SIZE;
53 	}
54 	return mem;
55 }
56 
57 static void *vm_memalign(size_t alignment, size_t size)
58 {
59 	void *mem, *p;
60 	unsigned pages;
61 
62 	assert(alignment <= PAGE_SIZE);
63 	size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
64 	pages = size / PAGE_SIZE;
65 	mem = p = alloc_vpages(pages);
66 	while (pages--) {
67 		phys_addr_t pa = virt_to_phys(alloc_page());
68 		install_page(page_root, pa, p);
69 		p += PAGE_SIZE;
70 	}
71 	return mem;
72 }
73 
74 static void vm_free(void *mem, size_t size)
75 {
76 	while (size) {
77 		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
78 		mem += PAGE_SIZE;
79 		size -= PAGE_SIZE;
80 	}
81 }
82 
83 static struct alloc_ops vmalloc_ops = {
84 	.memalign = vm_memalign,
85 	.free = vm_free,
86 	.align_min = PAGE_SIZE,
87 };
88 
89 void setup_vm()
90 {
91 	phys_addr_t base, top;
92 
93 	if (alloc_ops == &vmalloc_ops)
94 		return;
95 
96 	phys_alloc_get_unused(&base, &top);
97 	base = (base + PAGE_SIZE - 1) & -PAGE_SIZE;
98 	top = top & -PAGE_SIZE;
99 	free_pages(phys_to_virt(base), top - base);
100 	page_root = setup_mmu(top);
101 	alloc_ops = &vmalloc_ops;
102 }
103