1 /*
2  *  bootmem - A boot-time physical memory allocator and configurator
3  *
4  *  Copyright (C) 1999 Ingo Molnar
5  *                1999 Kanoj Sarcar, SGI
6  *                2008 Johannes Weiner
7  *
8  * Access to this subsystem has to be serialized externally (which is true
9  * for the boot process anyway).
10  */
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/slab.h>
14 #include <linux/bootmem.h>
15 #include <linux/export.h>
16 #include <linux/kmemleak.h>
17 #include <linux/range.h>
18 #include <linux/memblock.h>
19 
20 #include <asm/bug.h>
21 #include <asm/io.h>
22 #include <asm/processor.h>
23 
24 #include "internal.h"
25 
26 #ifndef CONFIG_NEED_MULTIPLE_NODES
27 struct pglist_data __refdata contig_page_data;
28 EXPORT_SYMBOL(contig_page_data);
29 #endif
30 
31 unsigned long max_low_pfn;
32 unsigned long min_low_pfn;
33 unsigned long max_pfn;
34 
__alloc_memory_core_early(int nid,u64 size,u64 align,u64 goal,u64 limit)35 static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 					u64 goal, u64 limit)
37 {
38 	void *ptr;
39 	u64 addr;
40 
41 	if (limit > memblock.current_limit)
42 		limit = memblock.current_limit;
43 
44 	addr = memblock_find_in_range_node(goal, limit, size, align, nid);
45 	if (!addr)
46 		return NULL;
47 
48 	ptr = phys_to_virt(addr);
49 	memset(ptr, 0, size);
50 	memblock_reserve(addr, size);
51 	/*
52 	 * The min_count is set to 0 so that bootmem allocated blocks
53 	 * are never reported as leaks.
54 	 */
55 	kmemleak_alloc(ptr, size, 0, 0);
56 	return ptr;
57 }
58 
59 /*
60  * free_bootmem_late - free bootmem pages directly to page allocator
61  * @addr: starting address of the range
62  * @size: size of the range in bytes
63  *
64  * This is only useful when the bootmem allocator has already been torn
65  * down, but we are still initializing the system.  Pages are given directly
66  * to the page allocator, no bootmem metadata is updated because it is gone.
67  */
free_bootmem_late(unsigned long addr,unsigned long size)68 void __init free_bootmem_late(unsigned long addr, unsigned long size)
69 {
70 	unsigned long cursor, end;
71 
72 	kmemleak_free_part(__va(addr), size);
73 
74 	cursor = PFN_UP(addr);
75 	end = PFN_DOWN(addr + size);
76 
77 	for (; cursor < end; cursor++) {
78 		__free_pages_bootmem(pfn_to_page(cursor), 0);
79 		totalram_pages++;
80 	}
81 }
82 
__free_pages_memory(unsigned long start,unsigned long end)83 static void __init __free_pages_memory(unsigned long start, unsigned long end)
84 {
85 	int i;
86 	unsigned long start_aligned, end_aligned;
87 	int order = ilog2(BITS_PER_LONG);
88 
89 	start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
90 	end_aligned = end & ~(BITS_PER_LONG - 1);
91 
92 	if (end_aligned <= start_aligned) {
93 		for (i = start; i < end; i++)
94 			__free_pages_bootmem(pfn_to_page(i), 0);
95 
96 		return;
97 	}
98 
99 	for (i = start; i < start_aligned; i++)
100 		__free_pages_bootmem(pfn_to_page(i), 0);
101 
102 	for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
103 		__free_pages_bootmem(pfn_to_page(i), order);
104 
105 	for (i = end_aligned; i < end; i++)
106 		__free_pages_bootmem(pfn_to_page(i), 0);
107 }
108 
free_low_memory_core_early(int nodeid)109 unsigned long __init free_low_memory_core_early(int nodeid)
110 {
111 	unsigned long count = 0;
112 	phys_addr_t start, end;
113 	u64 i;
114 
115 	/* free reserved array temporarily so that it's treated as free area */
116 	memblock_free_reserved_regions();
117 
118 	for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
119 		unsigned long start_pfn = PFN_UP(start);
120 		unsigned long end_pfn = min_t(unsigned long,
121 					      PFN_DOWN(end), max_low_pfn);
122 		if (start_pfn < end_pfn) {
123 			__free_pages_memory(start_pfn, end_pfn);
124 			count += end_pfn - start_pfn;
125 		}
126 	}
127 
128 	/* put region array back? */
129 	memblock_reserve_reserved_regions();
130 	return count;
131 }
132 
133 /**
134  * free_all_bootmem_node - release a node's free pages to the buddy allocator
135  * @pgdat: node to be released
136  *
137  * Returns the number of pages actually released.
138  */
free_all_bootmem_node(pg_data_t * pgdat)139 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
140 {
141 	register_page_bootmem_info_node(pgdat);
142 
143 	/* free_low_memory_core_early(MAX_NUMNODES) will be called later */
144 	return 0;
145 }
146 
147 /**
148  * free_all_bootmem - release free pages to the buddy allocator
149  *
150  * Returns the number of pages actually released.
151  */
free_all_bootmem(void)152 unsigned long __init free_all_bootmem(void)
153 {
154 	/*
155 	 * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
156 	 *  because in some case like Node0 doesn't have RAM installed
157 	 *  low ram will be on Node1
158 	 * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
159 	 *  will be used instead of only Node0 related
160 	 */
161 	return free_low_memory_core_early(MAX_NUMNODES);
162 }
163 
164 /**
165  * free_bootmem_node - mark a page range as usable
166  * @pgdat: node the range resides on
167  * @physaddr: starting address of the range
168  * @size: size of the range in bytes
169  *
170  * Partial pages will be considered reserved and left as they are.
171  *
172  * The range must reside completely on the specified node.
173  */
free_bootmem_node(pg_data_t * pgdat,unsigned long physaddr,unsigned long size)174 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
175 			      unsigned long size)
176 {
177 	kmemleak_free_part(__va(physaddr), size);
178 	memblock_free(physaddr, size);
179 }
180 
181 /**
182  * free_bootmem - mark a page range as usable
183  * @addr: starting address of the range
184  * @size: size of the range in bytes
185  *
186  * Partial pages will be considered reserved and left as they are.
187  *
188  * The range must be contiguous but may span node boundaries.
189  */
free_bootmem(unsigned long addr,unsigned long size)190 void __init free_bootmem(unsigned long addr, unsigned long size)
191 {
192 	kmemleak_free_part(__va(addr), size);
193 	memblock_free(addr, size);
194 }
195 
___alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)196 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
197 					unsigned long align,
198 					unsigned long goal,
199 					unsigned long limit)
200 {
201 	void *ptr;
202 
203 	if (WARN_ON_ONCE(slab_is_available()))
204 		return kzalloc(size, GFP_NOWAIT);
205 
206 restart:
207 
208 	ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
209 
210 	if (ptr)
211 		return ptr;
212 
213 	if (goal != 0) {
214 		goal = 0;
215 		goto restart;
216 	}
217 
218 	return NULL;
219 }
220 
221 /**
222  * __alloc_bootmem_nopanic - allocate boot memory without panicking
223  * @size: size of the request in bytes
224  * @align: alignment of the region
225  * @goal: preferred starting address of the region
226  *
227  * The goal is dropped if it can not be satisfied and the allocation will
228  * fall back to memory below @goal.
229  *
230  * Allocation may happen on any node in the system.
231  *
232  * Returns NULL on failure.
233  */
__alloc_bootmem_nopanic(unsigned long size,unsigned long align,unsigned long goal)234 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
235 					unsigned long goal)
236 {
237 	unsigned long limit = -1UL;
238 
239 	return ___alloc_bootmem_nopanic(size, align, goal, limit);
240 }
241 
___alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal,unsigned long limit)242 static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
243 					unsigned long goal, unsigned long limit)
244 {
245 	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
246 
247 	if (mem)
248 		return mem;
249 	/*
250 	 * Whoops, we cannot satisfy the allocation request.
251 	 */
252 	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
253 	panic("Out of memory");
254 	return NULL;
255 }
256 
257 /**
258  * __alloc_bootmem - allocate boot memory
259  * @size: size of the request in bytes
260  * @align: alignment of the region
261  * @goal: preferred starting address of the region
262  *
263  * The goal is dropped if it can not be satisfied and the allocation will
264  * fall back to memory below @goal.
265  *
266  * Allocation may happen on any node in the system.
267  *
268  * The function panics if the request can not be satisfied.
269  */
__alloc_bootmem(unsigned long size,unsigned long align,unsigned long goal)270 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
271 			      unsigned long goal)
272 {
273 	unsigned long limit = -1UL;
274 
275 	return ___alloc_bootmem(size, align, goal, limit);
276 }
277 
278 /**
279  * __alloc_bootmem_node - allocate boot memory from a specific node
280  * @pgdat: node to allocate from
281  * @size: size of the request in bytes
282  * @align: alignment of the region
283  * @goal: preferred starting address of the region
284  *
285  * The goal is dropped if it can not be satisfied and the allocation will
286  * fall back to memory below @goal.
287  *
288  * Allocation may fall back to any node in the system if the specified node
289  * can not hold the requested memory.
290  *
291  * The function panics if the request can not be satisfied.
292  */
__alloc_bootmem_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)293 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
294 				   unsigned long align, unsigned long goal)
295 {
296 	void *ptr;
297 
298 	if (WARN_ON_ONCE(slab_is_available()))
299 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
300 
301 	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
302 					 goal, -1ULL);
303 	if (ptr)
304 		return ptr;
305 
306 	return __alloc_memory_core_early(MAX_NUMNODES, size, align,
307 					 goal, -1ULL);
308 }
309 
__alloc_bootmem_node_high(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)310 void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
311 				   unsigned long align, unsigned long goal)
312 {
313 	return __alloc_bootmem_node(pgdat, size, align, goal);
314 }
315 
316 #ifdef CONFIG_SPARSEMEM
317 /**
318  * alloc_bootmem_section - allocate boot memory from a specific section
319  * @size: size of the request in bytes
320  * @section_nr: sparse map section to allocate from
321  *
322  * Return NULL on failure.
323  */
alloc_bootmem_section(unsigned long size,unsigned long section_nr)324 void * __init alloc_bootmem_section(unsigned long size,
325 				    unsigned long section_nr)
326 {
327 	unsigned long pfn, goal, limit;
328 
329 	pfn = section_nr_to_pfn(section_nr);
330 	goal = pfn << PAGE_SHIFT;
331 	limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
332 
333 	return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
334 					 SMP_CACHE_BYTES, goal, limit);
335 }
336 #endif
337 
__alloc_bootmem_node_nopanic(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)338 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
339 				   unsigned long align, unsigned long goal)
340 {
341 	void *ptr;
342 
343 	if (WARN_ON_ONCE(slab_is_available()))
344 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
345 
346 	ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
347 						 goal, -1ULL);
348 	if (ptr)
349 		return ptr;
350 
351 	return __alloc_bootmem_nopanic(size, align, goal);
352 }
353 
354 #ifndef ARCH_LOW_ADDRESS_LIMIT
355 #define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
356 #endif
357 
358 /**
359  * __alloc_bootmem_low - allocate low boot memory
360  * @size: size of the request in bytes
361  * @align: alignment of the region
362  * @goal: preferred starting address of the region
363  *
364  * The goal is dropped if it can not be satisfied and the allocation will
365  * fall back to memory below @goal.
366  *
367  * Allocation may happen on any node in the system.
368  *
369  * The function panics if the request can not be satisfied.
370  */
__alloc_bootmem_low(unsigned long size,unsigned long align,unsigned long goal)371 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
372 				  unsigned long goal)
373 {
374 	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
375 }
376 
377 /**
378  * __alloc_bootmem_low_node - allocate low boot memory from a specific node
379  * @pgdat: node to allocate from
380  * @size: size of the request in bytes
381  * @align: alignment of the region
382  * @goal: preferred starting address of the region
383  *
384  * The goal is dropped if it can not be satisfied and the allocation will
385  * fall back to memory below @goal.
386  *
387  * Allocation may fall back to any node in the system if the specified node
388  * can not hold the requested memory.
389  *
390  * The function panics if the request can not be satisfied.
391  */
__alloc_bootmem_low_node(pg_data_t * pgdat,unsigned long size,unsigned long align,unsigned long goal)392 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
393 				       unsigned long align, unsigned long goal)
394 {
395 	void *ptr;
396 
397 	if (WARN_ON_ONCE(slab_is_available()))
398 		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
399 
400 	ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
401 				goal, ARCH_LOW_ADDRESS_LIMIT);
402 	if (ptr)
403 		return ptr;
404 
405 	return  __alloc_memory_core_early(MAX_NUMNODES, size, align,
406 				goal, ARCH_LOW_ADDRESS_LIMIT);
407 }
408