Lines Matching full:section

65 	struct mem_section *section = NULL;  in sparse_index_alloc()  local
70 section = kzalloc_node(array_size, GFP_KERNEL, nid); in sparse_index_alloc()
72 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, in sparse_index_alloc()
74 if (!section) in sparse_index_alloc()
79 return section; in sparse_index_alloc()
85 struct mem_section *section; in sparse_index_init() local
88 * An existing section is possible in the sub-section hotplug in sparse_index_init()
90 * the existing section. in sparse_index_init()
97 section = sparse_index_alloc(nid); in sparse_index_init()
98 if (!section) in sparse_index_init()
101 mem_section[root] = section; in sparse_index_init()
114 * mem_map, we use section_mem_map to store the section's NUMA
123 static inline int sparse_early_nid(struct mem_section *section) in sparse_early_nid() argument
125 return (section->section_mem_map >> SECTION_NID_SHIFT); in sparse_early_nid()
329 * page being freed and making a section unremovable while in sparse_early_usemaps_alloc_pgdat_section()
331 * a pgdat can prevent a section being removed. If section A in sparse_early_usemaps_alloc_pgdat_section()
332 * contains a pgdat and section B contains the usemap, both in sparse_early_usemaps_alloc_pgdat_section()
334 * from the same section as the pgdat where possible to avoid in sparse_early_usemaps_alloc_pgdat_section()
378 pr_info("node %d must be removed before remove section %ld\n", in check_usemap_section_nr()
384 * Some platforms allow un-removable section because they will just in check_usemap_section_nr()
386 * Just notify un-removable section's number here. in check_usemap_section_nr()
388 …pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\… in check_usemap_section_nr()
452 * and we want it to be properly aligned to the section size - this is in sparse_buffer_init()
497 * Helper function that is used for generic section initialization, and
593 * for each and record the physical to section mapping.
707 "section already deactivated (%#lx + %ld)\n", in clear_subsection_map()
775 * When this function is called, the removing section is in free_map_bootmem()
777 * from page allocator. If removing section's memmap is placed in free_map_bootmem()
778 * on the same section, it must not be freed. in free_map_bootmem()
807 * 1. deactivation of a partial hot-added section (only possible in
809 * a) section was present at memory init.
810 * b) section was hot-added post memory init.
811 * 2. deactivation of a complete hot-added section.
812 * 3. deactivation of a complete section from memory init.
835 * Mark the section invalid so that valid_section() in section_deactivate()
842 * When removing an early section, the usage map is kept (as the in section_deactivate()
844 * will be re-used when re-adding the section - which is then no in section_deactivate()
845 * longer an early section. If the usage map is PageReserved, it in section_deactivate()
895 * referenced. If we hot-add memory into such a section then we in section_activate()
912 * sparse_add_section - add a memory section, or populate an existing one
913 * @nid: The node to add section on
915 * @nr_pages: number of pfns to add in the section
921 * Note that only VMEMMAP supports sub-section aligned hotplug,
927 * * -EEXIST - Section has been present.
958 /* Align memmap to section boundary in the subsection case */ in sparse_add_section()