1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Provide common bits of early_ioremap() support for architectures needing
4 * temporary mappings during boot before ioremap() is available.
5 *
6 * This is mostly a direct copy of the x86 early_ioremap implementation.
7 *
8 * (C) Copyright 1995 1996, 2014 Linus Torvalds
9 *
10 */
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/vmalloc.h>
18 #include <asm/fixmap.h>
19 #include <asm/early_ioremap.h>
20 #include "internal.h"
21
22 #ifdef CONFIG_MMU
23 static int early_ioremap_debug __initdata;
24
early_ioremap_debug_setup(char * str)25 static int __init early_ioremap_debug_setup(char *str)
26 {
27 early_ioremap_debug = 1;
28
29 return 0;
30 }
31 early_param("early_ioremap_debug", early_ioremap_debug_setup);
32
33 #define early_ioremap_dbg(fmt, args...) \
34 do { \
35 if (unlikely(early_ioremap_debug)) { \
36 pr_warn(fmt, ##args); \
37 dump_stack(); \
38 } \
39 } while (0)
40
41 static int after_paging_init __initdata;
42
early_memremap_pgprot_adjust(resource_size_t phys_addr,unsigned long size,pgprot_t prot)43 pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
44 unsigned long size,
45 pgprot_t prot)
46 {
47 return prot;
48 }
49
early_ioremap_reset(void)50 void __init early_ioremap_reset(void)
51 {
52 after_paging_init = 1;
53 }
54
55 /*
56 * Generally, ioremap() is available after paging_init() has been called.
57 * Architectures wanting to allow early_ioremap after paging_init() can
58 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
59 */
60 #ifndef __late_set_fixmap
__late_set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t prot)61 static inline void __init __late_set_fixmap(enum fixed_addresses idx,
62 phys_addr_t phys, pgprot_t prot)
63 {
64 BUG();
65 }
66 #endif
67
68 #ifndef __late_clear_fixmap
__late_clear_fixmap(enum fixed_addresses idx)69 static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
70 {
71 BUG();
72 }
73 #endif
74
75 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
76 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
77 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
78
early_ioremap_setup(void)79 void __init early_ioremap_setup(void)
80 {
81 int i;
82
83 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
84 WARN_ON_ONCE(prev_map[i]);
85 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
86 }
87 }
88
check_early_ioremap_leak(void)89 static int __init check_early_ioremap_leak(void)
90 {
91 int count = 0;
92 int i;
93
94 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
95 if (prev_map[i])
96 count++;
97
98 if (WARN(count, KERN_WARNING
99 "Debug warning: early ioremap leak of %d areas detected.\n"
100 "please boot with early_ioremap_debug and report the dmesg.\n",
101 count))
102 return 1;
103 return 0;
104 }
105 late_initcall(check_early_ioremap_leak);
106
107 static void __init __iomem *
__early_ioremap(resource_size_t phys_addr,unsigned long size,pgprot_t prot)108 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
109 {
110 unsigned long offset;
111 resource_size_t last_addr;
112 unsigned int nrpages;
113 enum fixed_addresses idx;
114 int i, slot;
115
116 WARN_ON(system_state >= SYSTEM_RUNNING);
117
118 slot = -1;
119 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
120 if (!prev_map[i]) {
121 slot = i;
122 break;
123 }
124 }
125
126 if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
127 __func__, &phys_addr, size))
128 return NULL;
129
130 /* Don't allow wraparound or zero size */
131 last_addr = phys_addr + size - 1;
132 if (WARN_ON(!size || last_addr < phys_addr))
133 return NULL;
134
135 prev_size[slot] = size;
136 /*
137 * Mappings have to be page-aligned
138 */
139 offset = offset_in_page(phys_addr);
140 phys_addr &= PAGE_MASK;
141 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
142
143 /*
144 * Mappings have to fit in the FIX_BTMAP area.
145 */
146 nrpages = size >> PAGE_SHIFT;
147 if (WARN_ON(nrpages > NR_FIX_BTMAPS))
148 return NULL;
149
150 early_ioremap_dbg("%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
151 __func__, &phys_addr, size, slot, slot_virt[slot], offset);
152
153 /*
154 * Ok, go for it..
155 */
156 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
157 while (nrpages > 0) {
158 if (after_paging_init)
159 __late_set_fixmap(idx, phys_addr, prot);
160 else
161 __early_set_fixmap(idx, phys_addr, prot);
162 phys_addr += PAGE_SIZE;
163 --idx;
164 --nrpages;
165 }
166
167 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
168 return prev_map[slot];
169 }
170
early_iounmap(void __iomem * addr,unsigned long size)171 void __init early_iounmap(void __iomem *addr, unsigned long size)
172 {
173 unsigned long virt_addr;
174 unsigned long offset;
175 unsigned int nrpages;
176 enum fixed_addresses idx;
177 int i, slot;
178
179 slot = -1;
180 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
181 if (prev_map[i] == addr) {
182 slot = i;
183 break;
184 }
185 }
186
187 if (WARN(slot < 0, "%s(%p, %08lx) not found slot\n",
188 __func__, addr, size))
189 return;
190
191 if (WARN(prev_size[slot] != size,
192 "%s(%p, %08lx) [%d] size not consistent %08lx\n",
193 __func__, addr, size, slot, prev_size[slot]))
194 return;
195
196 early_ioremap_dbg("%s(%p, %08lx) [%d]\n", __func__, addr, size, slot);
197
198 virt_addr = (unsigned long)addr;
199 if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
200 return;
201
202 offset = offset_in_page(virt_addr);
203 nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
204
205 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
206 while (nrpages > 0) {
207 if (after_paging_init)
208 __late_clear_fixmap(idx);
209 else
210 __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
211 --idx;
212 --nrpages;
213 }
214 prev_map[slot] = NULL;
215 }
216
217 /* Remap an IO device */
218 void __init __iomem *
early_ioremap(resource_size_t phys_addr,unsigned long size)219 early_ioremap(resource_size_t phys_addr, unsigned long size)
220 {
221 return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
222 }
223
224 /* Remap memory */
225 void __init *
early_memremap(resource_size_t phys_addr,unsigned long size)226 early_memremap(resource_size_t phys_addr, unsigned long size)
227 {
228 pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
229 FIXMAP_PAGE_NORMAL);
230
231 return (__force void *)__early_ioremap(phys_addr, size, prot);
232 }
233 #ifdef FIXMAP_PAGE_RO
234 void __init *
early_memremap_ro(resource_size_t phys_addr,unsigned long size)235 early_memremap_ro(resource_size_t phys_addr, unsigned long size)
236 {
237 pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
238 FIXMAP_PAGE_RO);
239
240 return (__force void *)__early_ioremap(phys_addr, size, prot);
241 }
242 #endif
243
244 #ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
245 void __init *
early_memremap_prot(resource_size_t phys_addr,unsigned long size,unsigned long prot_val)246 early_memremap_prot(resource_size_t phys_addr, unsigned long size,
247 unsigned long prot_val)
248 {
249 return (__force void *)__early_ioremap(phys_addr, size,
250 __pgprot(prot_val));
251 }
252 #endif
253
254 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
255
256 /*
257 * If no empty slot, handle that and return -ENOMEM.
258 */
copy_from_early_mem(void * dest,phys_addr_t src,unsigned long size)259 int __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
260 {
261 unsigned long slop, clen;
262 char *p;
263
264 while (size) {
265 slop = offset_in_page(src);
266 clen = size;
267 if (clen > MAX_MAP_CHUNK - slop)
268 clen = MAX_MAP_CHUNK - slop;
269 p = early_memremap(src & PAGE_MASK, clen + slop);
270 if (!p)
271 return -ENOMEM;
272 memcpy(dest, p + slop, clen);
273 early_memunmap(p, clen + slop);
274 dest += clen;
275 src += clen;
276 size -= clen;
277 }
278 return 0;
279 }
280
281 #else /* CONFIG_MMU */
282
283 void __init __iomem *
early_ioremap(resource_size_t phys_addr,unsigned long size)284 early_ioremap(resource_size_t phys_addr, unsigned long size)
285 {
286 return (__force void __iomem *)phys_addr;
287 }
288
289 /* Remap memory */
290 void __init *
early_memremap(resource_size_t phys_addr,unsigned long size)291 early_memremap(resource_size_t phys_addr, unsigned long size)
292 {
293 return (void *)phys_addr;
294 }
295 void __init *
early_memremap_ro(resource_size_t phys_addr,unsigned long size)296 early_memremap_ro(resource_size_t phys_addr, unsigned long size)
297 {
298 return (void *)phys_addr;
299 }
300
early_iounmap(void __iomem * addr,unsigned long size)301 void __init early_iounmap(void __iomem *addr, unsigned long size)
302 {
303 }
304
305 #endif /* CONFIG_MMU */
306
307
early_memunmap(void * addr,unsigned long size)308 void __init early_memunmap(void *addr, unsigned long size)
309 {
310 early_iounmap((__force void __iomem *)addr, size);
311 }
312