1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/mm.h>
4 #include <linux/cma.h>
5 #include <linux/compiler.h>
6 #include <linux/mm_inline.h>
7 
8 #include <asm/page.h>
9 #include <asm/setup.h>
10 
11 #include <linux/hugetlb.h>
12 #include "internal.h"
13 #include "hugetlb_cma.h"
14 
15 
16 static struct cma *hugetlb_cma[MAX_NUMNODES];
17 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
18 static bool hugetlb_cma_only;
19 static unsigned long hugetlb_cma_size __initdata;
20 
hugetlb_cma_free_folio(struct folio * folio)21 void hugetlb_cma_free_folio(struct folio *folio)
22 {
23 	int nid = folio_nid(folio);
24 
25 	WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
26 }
27 
28 
hugetlb_cma_alloc_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nodemask)29 struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
30 				      int nid, nodemask_t *nodemask)
31 {
32 	int node;
33 	int order = huge_page_order(h);
34 	struct folio *folio = NULL;
35 
36 	if (hugetlb_cma[nid])
37 		folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
38 
39 	if (!folio && !(gfp_mask & __GFP_THISNODE)) {
40 		for_each_node_mask(node, *nodemask) {
41 			if (node == nid || !hugetlb_cma[node])
42 				continue;
43 
44 			folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
45 			if (folio)
46 				break;
47 		}
48 	}
49 
50 	if (folio)
51 		folio_set_hugetlb_cma(folio);
52 
53 	return folio;
54 }
55 
56 struct huge_bootmem_page * __init
hugetlb_cma_alloc_bootmem(struct hstate * h,int * nid,bool node_exact)57 hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact)
58 {
59 	struct cma *cma;
60 	struct huge_bootmem_page *m;
61 	int node = *nid;
62 
63 	cma = hugetlb_cma[*nid];
64 	m = cma_reserve_early(cma, huge_page_size(h));
65 	if (!m) {
66 		if (node_exact)
67 			return NULL;
68 
69 		for_each_online_node(node) {
70 			cma = hugetlb_cma[node];
71 			if (!cma || node == *nid)
72 				continue;
73 			m = cma_reserve_early(cma, huge_page_size(h));
74 			if (m) {
75 				*nid = node;
76 				break;
77 			}
78 		}
79 	}
80 
81 	if (m) {
82 		m->flags = HUGE_BOOTMEM_CMA;
83 		m->cma = cma;
84 	}
85 
86 	return m;
87 }
88 
89 
90 static bool cma_reserve_called __initdata;
91 
cmdline_parse_hugetlb_cma(char * p)92 static int __init cmdline_parse_hugetlb_cma(char *p)
93 {
94 	int nid, count = 0;
95 	unsigned long tmp;
96 	char *s = p;
97 
98 	while (*s) {
99 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
100 			break;
101 
102 		if (s[count] == ':') {
103 			if (tmp >= MAX_NUMNODES)
104 				break;
105 			nid = array_index_nospec(tmp, MAX_NUMNODES);
106 
107 			s += count + 1;
108 			tmp = memparse(s, &s);
109 			hugetlb_cma_size_in_node[nid] = tmp;
110 			hugetlb_cma_size += tmp;
111 
112 			/*
113 			 * Skip the separator if have one, otherwise
114 			 * break the parsing.
115 			 */
116 			if (*s == ',')
117 				s++;
118 			else
119 				break;
120 		} else {
121 			hugetlb_cma_size = memparse(p, &p);
122 			break;
123 		}
124 	}
125 
126 	return 0;
127 }
128 
129 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
130 
cmdline_parse_hugetlb_cma_only(char * p)131 static int __init cmdline_parse_hugetlb_cma_only(char *p)
132 {
133 	return kstrtobool(p, &hugetlb_cma_only);
134 }
135 
136 early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
137 
hugetlb_cma_reserve(int order)138 void __init hugetlb_cma_reserve(int order)
139 {
140 	unsigned long size, reserved, per_node;
141 	bool node_specific_cma_alloc = false;
142 	int nid;
143 
144 	/*
145 	 * HugeTLB CMA reservation is required for gigantic
146 	 * huge pages which could not be allocated via the
147 	 * page allocator. Just warn if there is any change
148 	 * breaking this assumption.
149 	 */
150 	VM_WARN_ON(order <= MAX_PAGE_ORDER);
151 	cma_reserve_called = true;
152 
153 	if (!hugetlb_cma_size)
154 		return;
155 
156 	for (nid = 0; nid < MAX_NUMNODES; nid++) {
157 		if (hugetlb_cma_size_in_node[nid] == 0)
158 			continue;
159 
160 		if (!node_online(nid)) {
161 			pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
162 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
163 			hugetlb_cma_size_in_node[nid] = 0;
164 			continue;
165 		}
166 
167 		if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
168 			pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
169 				nid, (PAGE_SIZE << order) / SZ_1M);
170 			hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
171 			hugetlb_cma_size_in_node[nid] = 0;
172 		} else {
173 			node_specific_cma_alloc = true;
174 		}
175 	}
176 
177 	/* Validate the CMA size again in case some invalid nodes specified. */
178 	if (!hugetlb_cma_size)
179 		return;
180 
181 	if (hugetlb_cma_size < (PAGE_SIZE << order)) {
182 		pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
183 			(PAGE_SIZE << order) / SZ_1M);
184 		hugetlb_cma_size = 0;
185 		return;
186 	}
187 
188 	if (!node_specific_cma_alloc) {
189 		/*
190 		 * If 3 GB area is requested on a machine with 4 numa nodes,
191 		 * let's allocate 1 GB on first three nodes and ignore the last one.
192 		 */
193 		per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
194 		pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
195 			hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
196 	}
197 
198 	reserved = 0;
199 	for_each_online_node(nid) {
200 		int res;
201 		char name[CMA_MAX_NAME];
202 
203 		if (node_specific_cma_alloc) {
204 			if (hugetlb_cma_size_in_node[nid] == 0)
205 				continue;
206 
207 			size = hugetlb_cma_size_in_node[nid];
208 		} else {
209 			size = min(per_node, hugetlb_cma_size - reserved);
210 		}
211 
212 		size = round_up(size, PAGE_SIZE << order);
213 
214 		snprintf(name, sizeof(name), "hugetlb%d", nid);
215 		/*
216 		 * Note that 'order per bit' is based on smallest size that
217 		 * may be returned to CMA allocator in the case of
218 		 * huge page demotion.
219 		 */
220 		res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
221 					HUGETLB_PAGE_ORDER, name,
222 					&hugetlb_cma[nid], nid);
223 		if (res) {
224 			pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
225 				res, nid);
226 			continue;
227 		}
228 
229 		reserved += size;
230 		pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
231 			size / SZ_1M, nid);
232 
233 		if (reserved >= hugetlb_cma_size)
234 			break;
235 	}
236 
237 	if (!reserved)
238 		/*
239 		 * hugetlb_cma_size is used to determine if allocations from
240 		 * cma are possible.  Set to zero if no cma regions are set up.
241 		 */
242 		hugetlb_cma_size = 0;
243 }
244 
hugetlb_cma_check(void)245 void __init hugetlb_cma_check(void)
246 {
247 	if (!hugetlb_cma_size || cma_reserve_called)
248 		return;
249 
250 	pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
251 }
252 
hugetlb_cma_exclusive_alloc(void)253 bool hugetlb_cma_exclusive_alloc(void)
254 {
255 	return hugetlb_cma_only;
256 }
257 
hugetlb_cma_total_size(void)258 unsigned long __init hugetlb_cma_total_size(void)
259 {
260 	return hugetlb_cma_size;
261 }
262 
hugetlb_cma_validate_params(void)263 void __init hugetlb_cma_validate_params(void)
264 {
265 	if (!hugetlb_cma_size)
266 		hugetlb_cma_only = false;
267 }
268 
hugetlb_early_cma(struct hstate * h)269 bool __init hugetlb_early_cma(struct hstate *h)
270 {
271 	if (arch_has_huge_bootmem_alloc())
272 		return false;
273 
274 	return hstate_is_gigantic(h) && hugetlb_cma_only;
275 }
276