xref: /kvm-unit-tests/lib/vmalloc.c (revision 3874bb46f1258062b8e7c4f8fe1d6c84f38a436f)
1efd8e5aaSPaolo Bonzini /*
2efd8e5aaSPaolo Bonzini  * Copyright (C) 2012, 2017, Red Hat Inc.
3efd8e5aaSPaolo Bonzini  *
4efd8e5aaSPaolo Bonzini  * This allocator provides contiguous physical addresses with page
5efd8e5aaSPaolo Bonzini  * granularity.
6efd8e5aaSPaolo Bonzini  */
7efd8e5aaSPaolo Bonzini 
8efd8e5aaSPaolo Bonzini #include "libcflat.h"
9efd8e5aaSPaolo Bonzini #include "asm/spinlock.h"
10efd8e5aaSPaolo Bonzini #include "asm/page.h"
11937e2392SPaolo Bonzini #include "asm/io.h"
12dcda215bSPaolo Bonzini #include "alloc.h"
13937e2392SPaolo Bonzini #include "alloc_phys.h"
14937e2392SPaolo Bonzini #include "alloc_page.h"
15937e2392SPaolo Bonzini #include "vmalloc.h"
16efd8e5aaSPaolo Bonzini 
17efd8e5aaSPaolo Bonzini static struct spinlock lock;
18efd8e5aaSPaolo Bonzini static void *vfree_top = 0;
19937e2392SPaolo Bonzini static void *page_root;
20efd8e5aaSPaolo Bonzini 
21efd8e5aaSPaolo Bonzini void *alloc_vpages(ulong nr)
22efd8e5aaSPaolo Bonzini {
234aabe7c0SClaudio Imbrenda 	uintptr_t ptr;
244aabe7c0SClaudio Imbrenda 
25efd8e5aaSPaolo Bonzini 	spin_lock(&lock);
264aabe7c0SClaudio Imbrenda 	ptr = (uintptr_t)vfree_top;
274aabe7c0SClaudio Imbrenda 	ptr -= PAGE_SIZE * nr;
284aabe7c0SClaudio Imbrenda 	vfree_top = (void *)ptr;
29efd8e5aaSPaolo Bonzini 	spin_unlock(&lock);
304aabe7c0SClaudio Imbrenda 
314aabe7c0SClaudio Imbrenda 	/* Cannot return vfree_top here, we are outside the lock! */
324aabe7c0SClaudio Imbrenda 	return (void *)ptr;
33efd8e5aaSPaolo Bonzini }
34efd8e5aaSPaolo Bonzini 
35efd8e5aaSPaolo Bonzini void *alloc_vpage(void)
36efd8e5aaSPaolo Bonzini {
37efd8e5aaSPaolo Bonzini 	return alloc_vpages(1);
38efd8e5aaSPaolo Bonzini }
39efd8e5aaSPaolo Bonzini 
40dcda215bSPaolo Bonzini void *vmap(phys_addr_t phys, size_t size)
41dcda215bSPaolo Bonzini {
42dcda215bSPaolo Bonzini 	void *mem, *p;
43*3874bb46SClaudio Imbrenda 	size_t pages;
44dcda215bSPaolo Bonzini 
456ea7326aSClaudio Imbrenda 	size = PAGE_ALIGN(size);
46dcda215bSPaolo Bonzini 	pages = size / PAGE_SIZE;
47dcda215bSPaolo Bonzini 	mem = p = alloc_vpages(pages);
48dcda215bSPaolo Bonzini 
49dcda215bSPaolo Bonzini 	phys &= ~(unsigned long long)(PAGE_SIZE - 1);
50dcda215bSPaolo Bonzini 	while (pages--) {
51dcda215bSPaolo Bonzini 		install_page(page_root, phys, p);
52dcda215bSPaolo Bonzini 		phys += PAGE_SIZE;
53dcda215bSPaolo Bonzini 		p += PAGE_SIZE;
54dcda215bSPaolo Bonzini 	}
55dcda215bSPaolo Bonzini 	return mem;
56dcda215bSPaolo Bonzini }
57dcda215bSPaolo Bonzini 
58dcda215bSPaolo Bonzini static void *vm_memalign(size_t alignment, size_t size)
59dcda215bSPaolo Bonzini {
60dcda215bSPaolo Bonzini 	void *mem, *p;
61*3874bb46SClaudio Imbrenda 	size_t pages;
62dcda215bSPaolo Bonzini 
63dcda215bSPaolo Bonzini 	assert(alignment <= PAGE_SIZE);
646ea7326aSClaudio Imbrenda 	size = PAGE_ALIGN(size);
65dcda215bSPaolo Bonzini 	pages = size / PAGE_SIZE;
66dcda215bSPaolo Bonzini 	mem = p = alloc_vpages(pages);
67dcda215bSPaolo Bonzini 	while (pages--) {
68dcda215bSPaolo Bonzini 		phys_addr_t pa = virt_to_phys(alloc_page());
69dcda215bSPaolo Bonzini 		install_page(page_root, pa, p);
70dcda215bSPaolo Bonzini 		p += PAGE_SIZE;
71dcda215bSPaolo Bonzini 	}
72dcda215bSPaolo Bonzini 	return mem;
73dcda215bSPaolo Bonzini }
74dcda215bSPaolo Bonzini 
75dcda215bSPaolo Bonzini static void vm_free(void *mem, size_t size)
76dcda215bSPaolo Bonzini {
77dcda215bSPaolo Bonzini 	while (size) {
78dcda215bSPaolo Bonzini 		free_page(phys_to_virt(virt_to_pte_phys(page_root, mem)));
79dcda215bSPaolo Bonzini 		mem += PAGE_SIZE;
80dcda215bSPaolo Bonzini 		size -= PAGE_SIZE;
81dcda215bSPaolo Bonzini 	}
82dcda215bSPaolo Bonzini }
83dcda215bSPaolo Bonzini 
84dcda215bSPaolo Bonzini static struct alloc_ops vmalloc_ops = {
85dcda215bSPaolo Bonzini 	.memalign = vm_memalign,
86dcda215bSPaolo Bonzini 	.free = vm_free,
87dcda215bSPaolo Bonzini 	.align_min = PAGE_SIZE,
88dcda215bSPaolo Bonzini };
89dcda215bSPaolo Bonzini 
9048a0145fSPaolo Bonzini void __attribute__((__weak__)) find_highmem(void)
9148a0145fSPaolo Bonzini {
9248a0145fSPaolo Bonzini }
9348a0145fSPaolo Bonzini 
9417b9f93eSClaudio Imbrenda void init_alloc_vpage(void *top)
9517b9f93eSClaudio Imbrenda {
9617b9f93eSClaudio Imbrenda 	spin_lock(&lock);
9717b9f93eSClaudio Imbrenda 	assert(alloc_ops != &vmalloc_ops);
9817b9f93eSClaudio Imbrenda 	vfree_top = top;
9917b9f93eSClaudio Imbrenda 	spin_unlock(&lock);
10017b9f93eSClaudio Imbrenda }
10117b9f93eSClaudio Imbrenda 
102937e2392SPaolo Bonzini void setup_vm()
103937e2392SPaolo Bonzini {
104937e2392SPaolo Bonzini 	phys_addr_t base, top;
105dcda215bSPaolo Bonzini 
106dcda215bSPaolo Bonzini 	if (alloc_ops == &vmalloc_ops)
107dcda215bSPaolo Bonzini 		return;
108dcda215bSPaolo Bonzini 
109937e2392SPaolo Bonzini 	phys_alloc_get_unused(&base, &top);
110bf62a925SAndrew Jones 	assert(base != top || page_alloc_initialized());
11148a0145fSPaolo Bonzini 	/*
11248a0145fSPaolo Bonzini 	 * Give low memory immediately to the page allocator,
11348a0145fSPaolo Bonzini 	 * so that it can be used to allocate page tables.
11448a0145fSPaolo Bonzini 	 */
115bf62a925SAndrew Jones 	if (!page_alloc_initialized()) {
1166ea7326aSClaudio Imbrenda 		base = PAGE_ALIGN(base);
117937e2392SPaolo Bonzini 		top = top & -PAGE_SIZE;
118937e2392SPaolo Bonzini 		free_pages(phys_to_virt(base), top - base);
119bf62a925SAndrew Jones 	}
12048a0145fSPaolo Bonzini 
12148a0145fSPaolo Bonzini 	find_highmem();
12248a0145fSPaolo Bonzini 	phys_alloc_get_unused(&base, &top);
123937e2392SPaolo Bonzini 	page_root = setup_mmu(top);
12448a0145fSPaolo Bonzini 	if (base != top) {
1256ea7326aSClaudio Imbrenda 		base = PAGE_ALIGN(base);
12648a0145fSPaolo Bonzini 		top = top & -PAGE_SIZE;
12748a0145fSPaolo Bonzini 		free_pages(phys_to_virt(base), top - base);
12848a0145fSPaolo Bonzini 	}
12948a0145fSPaolo Bonzini 
13017b9f93eSClaudio Imbrenda 	spin_lock(&lock);
13117b9f93eSClaudio Imbrenda 	assert(alloc_ops != &vmalloc_ops);
132dcda215bSPaolo Bonzini 	alloc_ops = &vmalloc_ops;
13317b9f93eSClaudio Imbrenda 	spin_unlock(&lock);
134937e2392SPaolo Bonzini }
135