1 #include <linux/spinlock.h>
2 #include <linux/list.h>
3 #include <linux/slab.h>
4 
5 #include "vmregion.h"
6 
7 /*
8  * VM region handling support.
9  *
10  * This should become something generic, handling VM region allocations for
11  * vmalloc and similar (ioremap, module space, etc).
12  *
13  * I envisage vmalloc()'s supporting vm_struct becoming:
14  *
15  *  struct vm_struct {
16  *    struct vmregion	region;
17  *    unsigned long	flags;
18  *    struct page	**pages;
19  *    unsigned int	nr_pages;
20  *    unsigned long	phys_addr;
21  *  };
22  *
23  * get_vm_area() would then call vmregion_alloc with an appropriate
24  * struct vmregion head (eg):
25  *
26  *  struct vmregion vmalloc_head = {
27  *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list),
28  *	.vm_start	= VMALLOC_START,
29  *	.vm_end		= VMALLOC_END,
30  *  };
31  *
32  * However, vmalloc_head.vm_start is variable (typically, it is dependent on
33  * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
34  * would have to initialise this each time prior to calling vmregion_alloc().
35  */
36 
37 struct arm_vmregion *
arm_vmregion_alloc(struct arm_vmregion_head * head,size_t align,size_t size,gfp_t gfp)38 arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 		   size_t size, gfp_t gfp)
40 {
41 	unsigned long start = head->vm_start, addr = head->vm_end;
42 	unsigned long flags;
43 	struct arm_vmregion *c, *new;
44 
45 	if (head->vm_end - head->vm_start < size) {
46 		printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
47 			__func__, size);
48 		goto out;
49 	}
50 
51 	new = kmalloc(sizeof(struct arm_vmregion), gfp);
52 	if (!new)
53 		goto out;
54 
55 	spin_lock_irqsave(&head->vm_lock, flags);
56 
57 	addr = rounddown(addr - size, align);
58 	list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
59 		if (addr >= c->vm_end)
60 			goto found;
61 		addr = rounddown(c->vm_start - size, align);
62 		if (addr < start)
63 			goto nospc;
64 	}
65 
66  found:
67 	/*
68 	 * Insert this entry after the one we found.
69 	 */
70 	list_add(&new->vm_list, &c->vm_list);
71 	new->vm_start = addr;
72 	new->vm_end = addr + size;
73 	new->vm_active = 1;
74 
75 	spin_unlock_irqrestore(&head->vm_lock, flags);
76 	return new;
77 
78  nospc:
79 	spin_unlock_irqrestore(&head->vm_lock, flags);
80 	kfree(new);
81  out:
82 	return NULL;
83 }
84 
__arm_vmregion_find(struct arm_vmregion_head * head,unsigned long addr)85 static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
86 {
87 	struct arm_vmregion *c;
88 
89 	list_for_each_entry(c, &head->vm_list, vm_list) {
90 		if (c->vm_active && c->vm_start == addr)
91 			goto out;
92 	}
93 	c = NULL;
94  out:
95 	return c;
96 }
97 
arm_vmregion_find(struct arm_vmregion_head * head,unsigned long addr)98 struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
99 {
100 	struct arm_vmregion *c;
101 	unsigned long flags;
102 
103 	spin_lock_irqsave(&head->vm_lock, flags);
104 	c = __arm_vmregion_find(head, addr);
105 	spin_unlock_irqrestore(&head->vm_lock, flags);
106 	return c;
107 }
108 
arm_vmregion_find_remove(struct arm_vmregion_head * head,unsigned long addr)109 struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
110 {
111 	struct arm_vmregion *c;
112 	unsigned long flags;
113 
114 	spin_lock_irqsave(&head->vm_lock, flags);
115 	c = __arm_vmregion_find(head, addr);
116 	if (c)
117 		c->vm_active = 0;
118 	spin_unlock_irqrestore(&head->vm_lock, flags);
119 	return c;
120 }
121 
arm_vmregion_free(struct arm_vmregion_head * head,struct arm_vmregion * c)122 void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&head->vm_lock, flags);
127 	list_del(&c->vm_list);
128 	spin_unlock_irqrestore(&head->vm_lock, flags);
129 
130 	kfree(c);
131 }
132