Lines Matching +full:- +full:section

1 // SPDX-License-Identifier: GPL-2.0
24 * 1) mem_section - memory sections, mem_map's for valid memory
65 struct mem_section *section = NULL; in sparse_index_alloc() local
70 section = kzalloc_node(array_size, GFP_KERNEL, nid); in sparse_index_alloc()
72 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, in sparse_index_alloc()
74 if (!section) in sparse_index_alloc()
79 return section; in sparse_index_alloc()
85 struct mem_section *section; in sparse_index_init() local
88 * An existing section is possible in the sub-section hotplug in sparse_index_init()
89 * case. First hot-add instantiates, follow-on hot-add reuses in sparse_index_init()
90 * the existing section. in sparse_index_init()
97 section = sparse_index_alloc(nid); in sparse_index_init()
98 if (!section) in sparse_index_init()
99 return -ENOMEM; in sparse_index_init()
101 mem_section[root] = section; in sparse_index_init()
114 * mem_map, we use section_mem_map to store the section's NUMA
123 static inline int sparse_early_nid(struct mem_section *section) in sparse_early_nid() argument
125 return (section->section_mem_map >> SECTION_NID_SHIFT); in sparse_early_nid()
132 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); in mminit_validate_memmodel_limits()
135 * Sanity checks - do not allow an architecture to pass in mminit_validate_memmodel_limits()
140 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", in mminit_validate_memmodel_limits()
147 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", in mminit_validate_memmodel_limits()
170 ms->section_mem_map |= SECTION_MARKED_PRESENT; in __section_mark_present()
174 for (section_nr = next_present_section_nr(start-1); \
175 section_nr != -1; \
180 return next_present_section_nr(-1); in first_present_section_nr()
188 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set()
190 bitmap_set(map, idx, end - idx + 1); in subsection_mask_set()
195 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init()
206 - (pfn & ~PAGE_SECTION_MASK)); in subsection_map_init()
208 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init()
212 subsection_map_index(pfn + pfns - 1)); in subsection_map_init()
215 nr_pages -= pfns; in subsection_map_init()
245 unsigned long section = pfn_to_section_nr(pfn); in memory_present() local
248 sparse_index_init(section, nid); in memory_present()
249 set_section_nid(section, nid); in memory_present()
251 ms = __nr_to_section(section); in memory_present()
252 if (!ms->section_mem_map) { in memory_present()
253 ms->section_mem_map = sparse_encode_early_nid(nid) | in memory_present()
255 __section_mark_present(ms, section); in memory_present()
276 * the identity pfn - section_mem_map will return the actual
282 (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); in sparse_encode_mem_map()
304 ms->section_mem_map &= ~SECTION_MAP_MASK; in sparse_init_one_section()
305 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) in sparse_init_one_section()
307 ms->usage = usage; in sparse_init_one_section()
340 * page being freed and making a section unremovable while in sparse_early_usemaps_alloc_pgdat_section()
342 * a pgdat can prevent a section being removed. If section A in sparse_early_usemaps_alloc_pgdat_section()
343 * contains a pgdat and section B contains the usemap, both in sparse_early_usemaps_alloc_pgdat_section()
344 * sections become inter-dependent. This allocates usemaps in sparse_early_usemaps_alloc_pgdat_section()
345 * from the same section as the pgdat where possible to avoid in sparse_early_usemaps_alloc_pgdat_section()
389 pr_info("node %d must be removed before remove section %ld\n", in check_usemap_section_nr()
395 * Some platforms allow un-removable section because they will just in check_usemap_section_nr()
397 * Just notify un-removable section's number here. in check_usemap_section_nr()
399 …pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\… in check_usemap_section_nr()
407 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); in sparse_early_usemaps_alloc_pgdat_section()
462 * Pre-allocated buffer is mainly used by __populate_section_memmap in sparse_buffer_init()
463 * and we want it to be properly aligned to the section size - this is in sparse_buffer_init()
472 unsigned long size = sparsemap_buf_end - sparsemap_buf; in sparse_buffer_fini()
489 if ((unsigned long)(ptr - sparsemap_buf) > 0) in sparse_buffer_alloc()
490 sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); in sparse_buffer_alloc()
550 ms->section_mem_map = 0; in sparse_init_nid()
555 * Allocate the accumulated non-linear sections, allocate a mem_map
556 * for each and record the physical to section mapping.
605 ms->section_mem_map |= SECTION_IS_ONLINE; in online_mem_sections()
626 ms->section_mem_map &= ~SECTION_IS_ONLINE; in offline_mem_sections()
659 unsigned long *subsection_map = ms->usage in clear_subsection_map()
660 ? &ms->usage->subsection_map[0] : NULL; in clear_subsection_map()
667 "section already deactivated (%#lx + %ld)\n", in clear_subsection_map()
669 return -EINVAL; in clear_subsection_map()
677 return bitmap_empty(&ms->usage->subsection_map[0], in is_subsection_map_empty()
690 subsection_map = &ms->usage->subsection_map[0]; in fill_subsection_map()
693 rc = -EINVAL; in fill_subsection_map()
695 rc = -EEXIST; in fill_subsection_map()
727 magic = page->index; in free_map_bootmem()
735 * When this function is called, the removing section is in free_map_bootmem()
737 * from page allocator. If removing section's memmap is placed in free_map_bootmem()
738 * on the same section, it must not be freed. in free_map_bootmem()
767 * 1. deactivation of a partial hot-added section (only possible in
769 * a) section was present at memory init.
770 * b) section was hot-added post memory init.
771 * 2. deactivation of a complete hot-added section.
772 * 3. deactivation of a complete section from memory init.
795 * Mark the section invalid so that valid_section() in section_deactivate()
797 * ms->usage array. in section_deactivate()
799 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; in section_deactivate()
802 * When removing an early section, the usage map is kept (as the in section_deactivate()
804 * will be re-used when re-adding the section - which is then no in section_deactivate()
805 * longer an early section. If the usage map is PageReserved, it in section_deactivate()
808 if (!PageReserved(virt_to_page(ms->usage))) { in section_deactivate()
809 kfree_rcu(ms->usage, rcu); in section_deactivate()
810 WRITE_ONCE(ms->usage, NULL); in section_deactivate()
812 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); in section_deactivate()
825 ms->section_mem_map = (unsigned long)NULL; in section_deactivate()
837 if (!ms->usage) { in section_activate()
840 return ERR_PTR(-ENOMEM); in section_activate()
841 ms->usage = usage; in section_activate()
847 ms->usage = NULL; in section_activate()
855 * referenced. If we hot-add memory into such a section then we in section_activate()
865 return ERR_PTR(-ENOMEM); in section_activate()
872 * sparse_add_section - add a memory section, or populate an existing one
873 * @nid: The node to add section on
875 * @nr_pages: number of pfns to add in the section
881 * Note that only VMEMMAP supports sub-section aligned hotplug,
886 * * 0 - On success.
887 * * -EEXIST - Section has been present.
888 * * -ENOMEM - Out of memory.
917 /* Align memmap to section boundary in the subsection case */ in sparse_add_section()
920 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); in sparse_add_section()