xref: /kvm-unit-tests/lib/vmalloc.c (revision 17b9f93e15e2a77cc868288072ac4d39c33fff3b)
1 /*
2  * Copyright (C) 2012, 2017, Red Hat Inc.
3  *
4  * This allocator provides contiguous physical addresses with page
5  * granularity.
6  */
7 
8 #include "libcflat.h"
9 #include "asm/spinlock.h"
10 #include "asm/page.h"
11 #include "asm/io.h"
12 #include "alloc.h"
13 #include "alloc_phys.h"
14 #include "alloc_page.h"
15 #include "vmalloc.h"
16 
17 static struct spinlock lock;
18 static void *vfree_top = 0;
19 static void *page_root;
20 
21 void *alloc_vpages(ulong nr)
22 {
23 	uintptr_t ptr;
24 
25 	spin_lock(&lock);
26 	ptr = (uintptr_t)vfree_top;
27 	ptr -= PAGE_SIZE * nr;
28 	vfree_top = (void *)ptr;
29 	spin_unlock(&lock);
30 
31 	/* Cannot return vfree_top here, we are outside the lock! */
32 	return (void *)ptr;
33 }
34 
35 void *alloc_vpage(void)
36 {
37 	return alloc_vpages(1);
38 }
39 
40 void *vmap(phys_addr_t phys, size_t size)
41 {
42 	void *mem, *p;
43 	unsigned pages;
44 
45 	size = PAGE_ALIGN(size);
46 	pages = size / PAGE_SIZE;
47 	mem = p = alloc_vpages(pages);
48 
49 	phys &= ~(unsigned long long)(PAGE_SIZE - 1);
50 	while (pages--) {
51 		install_page(page_root, phys, p);
52 		phys += PAGE_SIZE;
53 		p += PAGE_SIZE;
54 	}
55 	return mem;
56 }
57 
58 static void *vm_memalign(size_t alignment, size_t size)
59 {
60 	void *mem, *p;
61 	unsigned pages;
62 
63 	assert(alignment <= PAGE_SIZE);
64 	size = PAGE_ALIGN(size);
65 	pages = size / PAGE_SIZE;
66 	mem = p = alloc_vpages(pages);
67 	while (pages--) {
68 		phys_addr_t pa = virt_to_phys(alloc_page());
69 		install_page(page_root, pa, p);
70 		p += PAGE_SIZE;
71 	}
72 	return mem;
73 }
74 
75 static void vm_free(void *mem, size_t size)
76 {
77 	while (size) {
78 		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
79 		mem += PAGE_SIZE;
80 		size -= PAGE_SIZE;
81 	}
82 }
83 
84 static struct alloc_ops vmalloc_ops = {
85 	.memalign = vm_memalign,
86 	.free = vm_free,
87 	.align_min = PAGE_SIZE,
88 };
89 
90 void __attribute__((__weak__)) find_highmem(void)
91 {
92 }
93 
94 void init_alloc_vpage(void *top)
95 {
96 	spin_lock(&lock);
97 	assert(alloc_ops != &vmalloc_ops);
98 	vfree_top = top;
99 	spin_unlock(&lock);
100 }
101 
102 void setup_vm()
103 {
104 	phys_addr_t base, top;
105 
106 	if (alloc_ops == &vmalloc_ops)
107 		return;
108 
109 	phys_alloc_get_unused(&base, &top);
110 	assert(base != top || page_alloc_initialized());
111 	/*
112 	 * Give low memory immediately to the page allocator,
113 	 * so that it can be used to allocate page tables.
114 	 */
115 	if (!page_alloc_initialized()) {
116 		base = PAGE_ALIGN(base);
117 		top = top & -PAGE_SIZE;
118 		free_pages(phys_to_virt(base), top - base);
119 	}
120 
121 	find_highmem();
122 	phys_alloc_get_unused(&base, &top);
123 	page_root = setup_mmu(top);
124 	if (base != top) {
125 		base = PAGE_ALIGN(base);
126 		top = top & -PAGE_SIZE;
127 		free_pages(phys_to_virt(base), top - base);
128 	}
129 
130 	spin_lock(&lock);
131 	assert(alloc_ops != &vmalloc_ops);
132 	alloc_ops = &vmalloc_ops;
133 	spin_unlock(&lock);
134 }
135