1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * CMA DebugFS Interface
4 *
5 * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
6 */
7
8
9 #include <linux/debugfs.h>
10 #include <linux/cma.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mm_types.h>
15
16 #include "cma.h"
17
18 struct cma_mem {
19 struct hlist_node node;
20 struct page *p;
21 unsigned long n;
22 };
23
cma_debugfs_get(void * data,u64 * val)24 static int cma_debugfs_get(void *data, u64 *val)
25 {
26 unsigned long *p = data;
27
28 *val = *p;
29
30 return 0;
31 }
32 DEFINE_DEBUGFS_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
33
cma_used_get(void * data,u64 * val)34 static int cma_used_get(void *data, u64 *val)
35 {
36 struct cma *cma = data;
37
38 spin_lock_irq(&cma->lock);
39 *val = cma->count - cma->available_count;
40 spin_unlock_irq(&cma->lock);
41
42 return 0;
43 }
44 DEFINE_DEBUGFS_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
45
cma_maxchunk_get(void * data,u64 * val)46 static int cma_maxchunk_get(void *data, u64 *val)
47 {
48 struct cma *cma = data;
49 struct cma_memrange *cmr;
50 unsigned long maxchunk = 0;
51 unsigned long start, end;
52 unsigned long bitmap_maxno;
53 int r;
54
55 spin_lock_irq(&cma->lock);
56 for (r = 0; r < cma->nranges; r++) {
57 cmr = &cma->ranges[r];
58 bitmap_maxno = cma_bitmap_maxno(cma, cmr);
59 end = 0;
60 for (;;) {
61 start = find_next_zero_bit(cmr->bitmap,
62 bitmap_maxno, end);
63 if (start >= bitmap_maxno)
64 break;
65 end = find_next_bit(cmr->bitmap, bitmap_maxno,
66 start);
67 maxchunk = max(end - start, maxchunk);
68 }
69 }
70 spin_unlock_irq(&cma->lock);
71 *val = (u64)maxchunk << cma->order_per_bit;
72
73 return 0;
74 }
75 DEFINE_DEBUGFS_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
76
cma_add_to_cma_mem_list(struct cma * cma,struct cma_mem * mem)77 static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
78 {
79 spin_lock(&cma->mem_head_lock);
80 hlist_add_head(&mem->node, &cma->mem_head);
81 spin_unlock(&cma->mem_head_lock);
82 }
83
cma_get_entry_from_list(struct cma * cma)84 static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
85 {
86 struct cma_mem *mem = NULL;
87
88 spin_lock(&cma->mem_head_lock);
89 if (!hlist_empty(&cma->mem_head)) {
90 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
91 hlist_del_init(&mem->node);
92 }
93 spin_unlock(&cma->mem_head_lock);
94
95 return mem;
96 }
97
cma_free_mem(struct cma * cma,int count)98 static int cma_free_mem(struct cma *cma, int count)
99 {
100 struct cma_mem *mem = NULL;
101
102 while (count) {
103 mem = cma_get_entry_from_list(cma);
104 if (mem == NULL)
105 return 0;
106
107 if (mem->n <= count) {
108 cma_release(cma, mem->p, mem->n);
109 count -= mem->n;
110 kfree(mem);
111 } else if (cma->order_per_bit == 0) {
112 cma_release(cma, mem->p, count);
113 mem->p += count;
114 mem->n -= count;
115 count = 0;
116 cma_add_to_cma_mem_list(cma, mem);
117 } else {
118 pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
119 cma_add_to_cma_mem_list(cma, mem);
120 break;
121 }
122 }
123
124 return 0;
125
126 }
127
cma_free_write(void * data,u64 val)128 static int cma_free_write(void *data, u64 val)
129 {
130 int pages = val;
131 struct cma *cma = data;
132
133 return cma_free_mem(cma, pages);
134 }
135 DEFINE_DEBUGFS_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
136
cma_alloc_mem(struct cma * cma,int count)137 static int cma_alloc_mem(struct cma *cma, int count)
138 {
139 struct cma_mem *mem;
140 struct page *p;
141
142 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
143 if (!mem)
144 return -ENOMEM;
145
146 p = cma_alloc(cma, count, 0, false);
147 if (!p) {
148 kfree(mem);
149 return -ENOMEM;
150 }
151
152 mem->p = p;
153 mem->n = count;
154
155 cma_add_to_cma_mem_list(cma, mem);
156
157 return 0;
158 }
159
cma_alloc_write(void * data,u64 val)160 static int cma_alloc_write(void *data, u64 val)
161 {
162 int pages = val;
163 struct cma *cma = data;
164
165 return cma_alloc_mem(cma, pages);
166 }
167 DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
168
cma_debugfs_add_one(struct cma * cma,struct dentry * root_dentry)169 static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
170 {
171 struct dentry *tmp, *dir, *rangedir;
172 int r;
173 char rdirname[12];
174 struct cma_memrange *cmr;
175
176 tmp = debugfs_create_dir(cma->name, root_dentry);
177
178 debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
179 debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
180 debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops);
181 debugfs_create_file("order_per_bit", 0444, tmp,
182 &cma->order_per_bit, &cma_debugfs_fops);
183 debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
184 debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
185
186 rangedir = debugfs_create_dir("ranges", tmp);
187 for (r = 0; r < cma->nranges; r++) {
188 cmr = &cma->ranges[r];
189 snprintf(rdirname, sizeof(rdirname), "%d", r);
190 dir = debugfs_create_dir(rdirname, rangedir);
191 debugfs_create_file("base_pfn", 0444, dir,
192 &cmr->base_pfn, &cma_debugfs_fops);
193 cmr->dfs_bitmap.array = (u32 *)cmr->bitmap;
194 cmr->dfs_bitmap.n_elements =
195 DIV_ROUND_UP(cma_bitmap_maxno(cma, cmr),
196 BITS_PER_BYTE * sizeof(u32));
197 debugfs_create_u32_array("bitmap", 0444, dir,
198 &cmr->dfs_bitmap);
199 }
200
201 /*
202 * Backward compatible symlinks to range 0 for base_pfn and bitmap.
203 */
204 debugfs_create_symlink("base_pfn", tmp, "ranges/0/base_pfn");
205 debugfs_create_symlink("bitmap", tmp, "ranges/0/bitmap");
206 }
207
cma_debugfs_init(void)208 static int __init cma_debugfs_init(void)
209 {
210 struct dentry *cma_debugfs_root;
211 int i;
212
213 cma_debugfs_root = debugfs_create_dir("cma", NULL);
214
215 for (i = 0; i < cma_area_count; i++)
216 cma_debugfs_add_one(&cma_areas[i], cma_debugfs_root);
217
218 return 0;
219 }
220 late_initcall(cma_debugfs_init);
221