Lines Matching +full:memory +full:- +full:mapped

5  * (C) Copyright 2005 - 2010  Paul Mundt
7 * Re-map IO memory to kernel address space so that we can access it.
8 * This is needed for high PCI addresses that aren't mapped in the
9 * 640k-1MB IO memory area on PC's
31 * On 32-bit SH, we traditionally have the whole physical address space mapped
42 phys_addr_t last_addr = offset + size - 1; in __ioremap_29bit()
46 * mapped. Uncached access for P1 addresses are done through P2. in __ioremap_29bit()
47 * In the P3 case or for addresses outside of the 29-bit space, in __ioremap_29bit()
65 /* P4 above the store queues are always mapped. */ in __ioremap_29bit()
80 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
81 * have to convert them into an offset in a page-aligned mapping, but the
90 void __iomem *mapped; in __ioremap_caller() local
92 mapped = __ioremap_trapped(phys_addr, size); in __ioremap_caller()
93 if (mapped) in __ioremap_caller()
94 return mapped; in __ioremap_caller()
96 mapped = __ioremap_29bit(phys_addr, size, pgprot); in __ioremap_caller()
97 if (mapped) in __ioremap_caller()
98 return mapped; in __ioremap_caller()
101 last_addr = phys_addr + size - 1; in __ioremap_caller()
113 * PMB entries are all pre-faulted. in __ioremap_caller()
115 mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); in __ioremap_caller()
116 if (mapped && !IS_ERR(mapped)) in __ioremap_caller()
117 return mapped; in __ioremap_caller()
120 * Mappings have to be page-aligned in __ioremap_caller()
124 size = PAGE_ALIGN(last_addr+1) - phys_addr; in __ioremap_caller()
132 area->phys_addr = phys_addr; in __ioremap_caller()
133 orig_addr = addr = (unsigned long)area->addr; in __ioremap_caller()
145 * Simple checks for non-translatable mappings.
151 * In 29-bit mode this includes the fixed P1/P2 areas, as well as in iomapping_nontranslatable()