xref: /kvm-unit-tests/lib/vmalloc.c (revision 48a0145f4ebc1d6d1c045f13fe2b9d5f8600120f)
1 /*
2  * Copyright (C) 2012, 2017, Red Hat Inc.
3  *
4  * This allocator provides contiguous physical addresses with page
5  * granularity.
6  */
7 
8 #include "libcflat.h"
9 #include "asm/spinlock.h"
10 #include "asm/page.h"
11 #include "asm/io.h"
12 #include "alloc.h"
13 #include "alloc_phys.h"
14 #include "alloc_page.h"
15 #include "vmalloc.h"
16 
17 static struct spinlock lock;
18 static void *vfree_top = 0;
19 static void *page_root;
20 
21 void *alloc_vpages(ulong nr)
22 {
23 	spin_lock(&lock);
24 	vfree_top -= PAGE_SIZE * nr;
25 	spin_unlock(&lock);
26 	return vfree_top;
27 }
28 
29 void *alloc_vpage(void)
30 {
31 	return alloc_vpages(1);
32 }
33 
34 void init_alloc_vpage(void *top)
35 {
36 	vfree_top = top;
37 }
38 
39 void *vmap(phys_addr_t phys, size_t size)
40 {
41 	void *mem, *p;
42 	unsigned pages;
43 
44 	size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
45 	pages = size / PAGE_SIZE;
46 	mem = p = alloc_vpages(pages);
47 
48 	phys &= ~(unsigned long long)(PAGE_SIZE - 1);
49 	while (pages--) {
50 		install_page(page_root, phys, p);
51 		phys += PAGE_SIZE;
52 		p += PAGE_SIZE;
53 	}
54 	return mem;
55 }
56 
57 static void *vm_memalign(size_t alignment, size_t size)
58 {
59 	void *mem, *p;
60 	unsigned pages;
61 
62 	assert(alignment <= PAGE_SIZE);
63 	size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
64 	pages = size / PAGE_SIZE;
65 	mem = p = alloc_vpages(pages);
66 	while (pages--) {
67 		phys_addr_t pa = virt_to_phys(alloc_page());
68 		install_page(page_root, pa, p);
69 		p += PAGE_SIZE;
70 	}
71 	return mem;
72 }
73 
74 static void vm_free(void *mem, size_t size)
75 {
76 	while (size) {
77 		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
78 		mem += PAGE_SIZE;
79 		size -= PAGE_SIZE;
80 	}
81 }
82 
83 static struct alloc_ops vmalloc_ops = {
84 	.memalign = vm_memalign,
85 	.free = vm_free,
86 	.align_min = PAGE_SIZE,
87 };
88 
89 void __attribute__((__weak__)) find_highmem(void)
90 {
91 }
92 
93 void setup_vm()
94 {
95 	phys_addr_t base, top;
96 
97 	if (alloc_ops == &vmalloc_ops)
98 		return;
99 
100 	phys_alloc_get_unused(&base, &top);
101 	assert(base != top || page_alloc_initialized());
102 	/*
103 	 * Give low memory immediately to the page allocator,
104 	 * so that it can be used to allocate page tables.
105 	 */
106 	if (!page_alloc_initialized()) {
107 		base = (base + PAGE_SIZE - 1) & -PAGE_SIZE;
108 		top = top & -PAGE_SIZE;
109 		free_pages(phys_to_virt(base), top - base);
110 	}
111 
112 	find_highmem();
113 	phys_alloc_get_unused(&base, &top);
114 	page_root = setup_mmu(top);
115 	if (base != top) {
116 		base = (base + PAGE_SIZE - 1) & -PAGE_SIZE;
117 		top = top & -PAGE_SIZE;
118 		free_pages(phys_to_virt(base), top - base);
119 	}
120 
121 	alloc_ops = &vmalloc_ops;
122 }
123