1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #include <linux/init.h>
6 #include <linux/export.h>
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/smp.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/pagemap.h>
15 #include <linux/memblock.h>
16 #include <linux/memremap.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/highmem.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pfn.h>
23 #include <linux/hardirq.h>
24 #include <linux/gfp.h>
25 #include <linux/hugetlb.h>
26 #include <linux/mmzone.h>
27 #include <linux/execmem.h>
28 
29 #include <asm/asm-offsets.h>
30 #include <asm/bootinfo.h>
31 #include <asm/cpu.h>
32 #include <asm/dma.h>
33 #include <asm/mmu_context.h>
34 #include <asm/sections.h>
35 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
37 #include <asm/tlb.h>
38 
39 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
40 EXPORT_SYMBOL(empty_zero_page);
41 
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)42 void copy_user_highpage(struct page *to, struct page *from,
43 	unsigned long vaddr, struct vm_area_struct *vma)
44 {
45 	void *vfrom, *vto;
46 
47 	vfrom = kmap_local_page(from);
48 	vto = kmap_local_page(to);
49 	copy_page(vto, vfrom);
50 	kunmap_local(vfrom);
51 	kunmap_local(vto);
52 	/* Make sure this page is cleared on other CPU's too before using it */
53 	smp_wmb();
54 }
55 
page_is_ram(unsigned long pfn)56 int __ref page_is_ram(unsigned long pfn)
57 {
58 	unsigned long addr = PFN_PHYS(pfn);
59 
60 	return memblock_is_memory(addr) && !memblock_is_reserved(addr);
61 }
62 
63 #ifndef CONFIG_NUMA
paging_init(void)64 void __init paging_init(void)
65 {
66 	unsigned long max_zone_pfns[MAX_NR_ZONES];
67 
68 #ifdef CONFIG_ZONE_DMA32
69 	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
70 #endif
71 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
72 
73 	free_area_init(max_zone_pfns);
74 }
75 #endif /* !CONFIG_NUMA */
76 
free_initmem(void)77 void __ref free_initmem(void)
78 {
79 	free_initmem_default(POISON_FREE_INITMEM);
80 }
81 
82 #ifdef CONFIG_MEMORY_HOTPLUG
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)83 int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
84 {
85 	unsigned long start_pfn = start >> PAGE_SHIFT;
86 	unsigned long nr_pages = size >> PAGE_SHIFT;
87 	int ret;
88 
89 	ret = __add_pages(nid, start_pfn, nr_pages, params);
90 
91 	if (ret)
92 		pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
93 				__func__,  ret);
94 
95 	return ret;
96 }
97 
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)98 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
99 {
100 	unsigned long start_pfn = start >> PAGE_SHIFT;
101 	unsigned long nr_pages = size >> PAGE_SHIFT;
102 	struct page *page = pfn_to_page(start_pfn);
103 
104 	/* With altmap the first mapped page is offset from @start */
105 	if (altmap)
106 		page += vmem_altmap_offset(altmap);
107 	__remove_pages(start_pfn, nr_pages, altmap);
108 }
109 
110 #ifdef CONFIG_NUMA
memory_add_physaddr_to_nid(u64 start)111 int memory_add_physaddr_to_nid(u64 start)
112 {
113 	return pa_to_nid(start);
114 }
115 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
116 #endif
117 #endif
118 
119 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_set_pmd(pmd_t * pmd,void * p,int node,unsigned long addr,unsigned long next)120 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
121 			       unsigned long addr, unsigned long next)
122 {
123 	pmd_t entry;
124 
125 	entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
126 	pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
127 	set_pmd_at(&init_mm, addr, pmd, entry);
128 }
129 
vmemmap_check_pmd(pmd_t * pmd,int node,unsigned long addr,unsigned long next)130 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
131 				unsigned long addr, unsigned long next)
132 {
133 	int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
134 
135 	if (huge)
136 		vmemmap_verify((pte_t *)pmd, node, addr, next);
137 
138 	return huge;
139 }
140 
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)141 int __meminit vmemmap_populate(unsigned long start, unsigned long end,
142 			       int node, struct vmem_altmap *altmap)
143 {
144 #if CONFIG_PGTABLE_LEVELS == 2
145 	return vmemmap_populate_basepages(start, end, node, NULL);
146 #else
147 	return vmemmap_populate_hugepages(start, end, node, NULL);
148 #endif
149 }
150 
151 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)152 void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
153 {
154 }
155 #endif
156 #endif
157 
populate_kernel_pte(unsigned long addr)158 pte_t * __init populate_kernel_pte(unsigned long addr)
159 {
160 	pgd_t *pgd = pgd_offset_k(addr);
161 	p4d_t *p4d = p4d_offset(pgd, addr);
162 	pud_t *pud;
163 	pmd_t *pmd;
164 
165 	if (p4d_none(p4dp_get(p4d))) {
166 		pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
167 		p4d_populate(&init_mm, p4d, pud);
168 #ifndef __PAGETABLE_PUD_FOLDED
169 		pud_init(pud);
170 #endif
171 	}
172 
173 	pud = pud_offset(p4d, addr);
174 	if (pud_none(pudp_get(pud))) {
175 		pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
176 		pud_populate(&init_mm, pud, pmd);
177 #ifndef __PAGETABLE_PMD_FOLDED
178 		pmd_init(pmd);
179 #endif
180 	}
181 
182 	pmd = pmd_offset(pud, addr);
183 	if (!pmd_present(pmdp_get(pmd))) {
184 		pte_t *pte;
185 
186 		pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE);
187 		pmd_populate_kernel(&init_mm, pmd, pte);
188 		kernel_pte_init(pte);
189 	}
190 
191 	return pte_offset_kernel(pmd, addr);
192 }
193 
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)194 void __init __set_fixmap(enum fixed_addresses idx,
195 			       phys_addr_t phys, pgprot_t flags)
196 {
197 	unsigned long addr = __fix_to_virt(idx);
198 	pte_t *ptep;
199 
200 	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
201 
202 	ptep = populate_kernel_pte(addr);
203 	if (!pte_none(ptep_get(ptep))) {
204 		pte_ERROR(*ptep);
205 		return;
206 	}
207 
208 	if (pgprot_val(flags))
209 		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
210 	else {
211 		pte_clear(&init_mm, addr, ptep);
212 		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
213 	}
214 }
215 
216 /*
217  * Align swapper_pg_dir in to 64K, allows its address to be loaded
218  * with a single LUI instruction in the TLB handlers.  If we used
219  * __aligned(64K), its size would get rounded up to the alignment
220  * size, and waste space.  So we place it in its own section and align
221  * it in the linker script.
222  */
223 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
224 
225 pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
226 #ifndef __PAGETABLE_PUD_FOLDED
227 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
228 EXPORT_SYMBOL(invalid_pud_table);
229 #endif
230 #ifndef __PAGETABLE_PMD_FOLDED
231 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
232 EXPORT_SYMBOL(invalid_pmd_table);
233 #endif
234 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
235 EXPORT_SYMBOL(invalid_pte_table);
236 
237 #ifdef CONFIG_EXECMEM
238 static struct execmem_info execmem_info __ro_after_init;
239 
execmem_arch_setup(void)240 struct execmem_info __init *execmem_arch_setup(void)
241 {
242 	execmem_info = (struct execmem_info){
243 		.ranges = {
244 			[EXECMEM_DEFAULT] = {
245 				.start	= MODULES_VADDR,
246 				.end	= MODULES_END,
247 				.pgprot	= PAGE_KERNEL,
248 				.alignment = 1,
249 			},
250 		},
251 	};
252 
253 	return &execmem_info;
254 }
255 #endif /* CONFIG_EXECMEM */
256