1 /*
2  *  This file contains ioremap and related functions for 64-bit machines.
3  *
4  *  Derived from arch/ppc64/mm/init.c
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *
7  *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
9  *    Copyright (C) 1996 Paul Mackerras
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  *
14  *  Dave Engebretsen <engebret@us.ibm.com>
15  *      Rework for PPC64 port.
16  *
17  *  This program is free software; you can redistribute it and/or
18  *  modify it under the terms of the GNU General Public License
19  *  as published by the Free Software Foundation; either version
20  *  2 of the License, or (at your option) any later version.
21  *
22  */
23 
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
32 #include <linux/mm.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/init.h>
37 #include <linux/bootmem.h>
38 #include <linux/memblock.h>
39 #include <linux/slab.h>
40 
41 #include <asm/pgalloc.h>
42 #include <asm/page.h>
43 #include <asm/prom.h>
44 #include <asm/io.h>
45 #include <asm/mmu_context.h>
46 #include <asm/pgtable.h>
47 #include <asm/mmu.h>
48 #include <asm/smp.h>
49 #include <asm/machdep.h>
50 #include <asm/tlb.h>
51 #include <asm/processor.h>
52 #include <asm/cputable.h>
53 #include <asm/sections.h>
54 #include <asm/system.h>
55 #include <asm/abs_addr.h>
56 #include <asm/firmware.h>
57 
58 #include "mmu_decl.h"
59 
60 unsigned long ioremap_bot = IOREMAP_BASE;
61 
62 
63 #ifdef CONFIG_PPC_MMU_NOHASH
early_alloc_pgtable(unsigned long size)64 static void *early_alloc_pgtable(unsigned long size)
65 {
66 	void *pt;
67 
68 	if (init_bootmem_done)
69 		pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
70 	else
71 		pt = __va(memblock_alloc_base(size, size,
72 					 __pa(MAX_DMA_ADDRESS)));
73 	memset(pt, 0, size);
74 
75 	return pt;
76 }
77 #endif /* CONFIG_PPC_MMU_NOHASH */
78 
79 /*
80  * map_kernel_page currently only called by __ioremap
81  * map_kernel_page adds an entry to the ioremap page table
82  * and adds an entry to the HPT, possibly bolting it
83  */
map_kernel_page(unsigned long ea,unsigned long pa,int flags)84 int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
85 {
86 	pgd_t *pgdp;
87 	pud_t *pudp;
88 	pmd_t *pmdp;
89 	pte_t *ptep;
90 
91 	if (slab_is_available()) {
92 		pgdp = pgd_offset_k(ea);
93 		pudp = pud_alloc(&init_mm, pgdp, ea);
94 		if (!pudp)
95 			return -ENOMEM;
96 		pmdp = pmd_alloc(&init_mm, pudp, ea);
97 		if (!pmdp)
98 			return -ENOMEM;
99 		ptep = pte_alloc_kernel(pmdp, ea);
100 		if (!ptep)
101 			return -ENOMEM;
102 		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
103 							  __pgprot(flags)));
104 	} else {
105 #ifdef CONFIG_PPC_MMU_NOHASH
106 		/* Warning ! This will blow up if bootmem is not initialized
107 		 * which our ppc64 code is keen to do that, we'll need to
108 		 * fix it and/or be more careful
109 		 */
110 		pgdp = pgd_offset_k(ea);
111 #ifdef PUD_TABLE_SIZE
112 		if (pgd_none(*pgdp)) {
113 			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
114 			BUG_ON(pudp == NULL);
115 			pgd_populate(&init_mm, pgdp, pudp);
116 		}
117 #endif /* PUD_TABLE_SIZE */
118 		pudp = pud_offset(pgdp, ea);
119 		if (pud_none(*pudp)) {
120 			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
121 			BUG_ON(pmdp == NULL);
122 			pud_populate(&init_mm, pudp, pmdp);
123 		}
124 		pmdp = pmd_offset(pudp, ea);
125 		if (!pmd_present(*pmdp)) {
126 			ptep = early_alloc_pgtable(PAGE_SIZE);
127 			BUG_ON(ptep == NULL);
128 			pmd_populate_kernel(&init_mm, pmdp, ptep);
129 		}
130 		ptep = pte_offset_kernel(pmdp, ea);
131 		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
132 							  __pgprot(flags)));
133 #else /* CONFIG_PPC_MMU_NOHASH */
134 		/*
135 		 * If the mm subsystem is not fully up, we cannot create a
136 		 * linux page table entry for this mapping.  Simply bolt an
137 		 * entry in the hardware page table.
138 		 *
139 		 */
140 		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
141 				      mmu_io_psize, mmu_kernel_ssize)) {
142 			printk(KERN_ERR "Failed to do bolted mapping IO "
143 			       "memory at %016lx !\n", pa);
144 			return -ENOMEM;
145 		}
146 #endif /* !CONFIG_PPC_MMU_NOHASH */
147 	}
148 	return 0;
149 }
150 
151 
152 /**
153  * __ioremap_at - Low level function to establish the page tables
154  *                for an IO mapping
155  */
__ioremap_at(phys_addr_t pa,void * ea,unsigned long size,unsigned long flags)156 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
157 			    unsigned long flags)
158 {
159 	unsigned long i;
160 
161 	/* Make sure we have the base flags */
162 	if ((flags & _PAGE_PRESENT) == 0)
163 		flags |= pgprot_val(PAGE_KERNEL);
164 
165 	/* Non-cacheable page cannot be coherent */
166 	if (flags & _PAGE_NO_CACHE)
167 		flags &= ~_PAGE_COHERENT;
168 
169 	/* We don't support the 4K PFN hack with ioremap */
170 	if (flags & _PAGE_4K_PFN)
171 		return NULL;
172 
173 	WARN_ON(pa & ~PAGE_MASK);
174 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
175 	WARN_ON(size & ~PAGE_MASK);
176 
177 	for (i = 0; i < size; i += PAGE_SIZE)
178 		if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
179 			return NULL;
180 
181 	return (void __iomem *)ea;
182 }
183 
184 /**
185  * __iounmap_from - Low level function to tear down the page tables
186  *                  for an IO mapping. This is used for mappings that
187  *                  are manipulated manually, like partial unmapping of
188  *                  PCI IOs or ISA space.
189  */
__iounmap_at(void * ea,unsigned long size)190 void __iounmap_at(void *ea, unsigned long size)
191 {
192 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
193 	WARN_ON(size & ~PAGE_MASK);
194 
195 	unmap_kernel_range((unsigned long)ea, size);
196 }
197 
__ioremap_caller(phys_addr_t addr,unsigned long size,unsigned long flags,void * caller)198 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
199 				unsigned long flags, void *caller)
200 {
201 	phys_addr_t paligned;
202 	void __iomem *ret;
203 
204 	/*
205 	 * Choose an address to map it to.
206 	 * Once the imalloc system is running, we use it.
207 	 * Before that, we map using addresses going
208 	 * up from ioremap_bot.  imalloc will use
209 	 * the addresses from ioremap_bot through
210 	 * IMALLOC_END
211 	 *
212 	 */
213 	paligned = addr & PAGE_MASK;
214 	size = PAGE_ALIGN(addr + size) - paligned;
215 
216 	if ((size == 0) || (paligned == 0))
217 		return NULL;
218 
219 	if (mem_init_done) {
220 		struct vm_struct *area;
221 
222 		area = __get_vm_area_caller(size, VM_IOREMAP,
223 					    ioremap_bot, IOREMAP_END,
224 					    caller);
225 		if (area == NULL)
226 			return NULL;
227 
228 		area->phys_addr = paligned;
229 		ret = __ioremap_at(paligned, area->addr, size, flags);
230 		if (!ret)
231 			vunmap(area->addr);
232 	} else {
233 		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
234 		if (ret)
235 			ioremap_bot += size;
236 	}
237 
238 	if (ret)
239 		ret += addr & ~PAGE_MASK;
240 	return ret;
241 }
242 
__ioremap(phys_addr_t addr,unsigned long size,unsigned long flags)243 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
244 			 unsigned long flags)
245 {
246 	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
247 }
248 
ioremap(phys_addr_t addr,unsigned long size)249 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
250 {
251 	unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
252 	void *caller = __builtin_return_address(0);
253 
254 	if (ppc_md.ioremap)
255 		return ppc_md.ioremap(addr, size, flags, caller);
256 	return __ioremap_caller(addr, size, flags, caller);
257 }
258 
ioremap_wc(phys_addr_t addr,unsigned long size)259 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
260 {
261 	unsigned long flags = _PAGE_NO_CACHE;
262 	void *caller = __builtin_return_address(0);
263 
264 	if (ppc_md.ioremap)
265 		return ppc_md.ioremap(addr, size, flags, caller);
266 	return __ioremap_caller(addr, size, flags, caller);
267 }
268 
ioremap_prot(phys_addr_t addr,unsigned long size,unsigned long flags)269 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
270 			     unsigned long flags)
271 {
272 	void *caller = __builtin_return_address(0);
273 
274 	/* writeable implies dirty for kernel addresses */
275 	if (flags & _PAGE_RW)
276 		flags |= _PAGE_DIRTY;
277 
278 	/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
279 	flags &= ~(_PAGE_USER | _PAGE_EXEC);
280 
281 #ifdef _PAGE_BAP_SR
282 	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
283 	 * which means that we just cleared supervisor access... oops ;-) This
284 	 * restores it
285 	 */
286 	flags |= _PAGE_BAP_SR;
287 #endif
288 
289 	if (ppc_md.ioremap)
290 		return ppc_md.ioremap(addr, size, flags, caller);
291 	return __ioremap_caller(addr, size, flags, caller);
292 }
293 
294 
295 /*
296  * Unmap an IO region and remove it from imalloc'd list.
297  * Access to IO memory should be serialized by driver.
298  */
__iounmap(volatile void __iomem * token)299 void __iounmap(volatile void __iomem *token)
300 {
301 	void *addr;
302 
303 	if (!mem_init_done)
304 		return;
305 
306 	addr = (void *) ((unsigned long __force)
307 			 PCI_FIX_ADDR(token) & PAGE_MASK);
308 	if ((unsigned long)addr < ioremap_bot) {
309 		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
310 		       " at 0x%p\n", addr);
311 		return;
312 	}
313 	vunmap(addr);
314 }
315 
iounmap(volatile void __iomem * token)316 void iounmap(volatile void __iomem *token)
317 {
318 	if (ppc_md.iounmap)
319 		ppc_md.iounmap(token);
320 	else
321 		__iounmap(token);
322 }
323 
324 EXPORT_SYMBOL(ioremap);
325 EXPORT_SYMBOL(ioremap_wc);
326 EXPORT_SYMBOL(ioremap_prot);
327 EXPORT_SYMBOL(__ioremap);
328 EXPORT_SYMBOL(__ioremap_at);
329 EXPORT_SYMBOL(iounmap);
330 EXPORT_SYMBOL(__iounmap);
331 EXPORT_SYMBOL(__iounmap_at);
332