1 /* 2 * s390x MMU 3 * 4 * Copyright (c) 2017 Red Hat Inc 5 * 6 * Authors: 7 * David Hildenbrand <david@redhat.com> 8 * 9 * This code is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU Library General Public License version 2. 11 */ 12 13 #include <libcflat.h> 14 #include <asm/pgtable.h> 15 #include <asm/arch_def.h> 16 #include <asm/barrier.h> 17 #include <vmalloc.h> 18 #include "mmu.h" 19 20 static pgd_t *table_root; 21 22 void configure_dat(int enable) 23 { 24 uint64_t mask; 25 26 if (enable) 27 mask = extract_psw_mask() | PSW_MASK_DAT; 28 else 29 mask = extract_psw_mask() & ~PSW_MASK_DAT; 30 31 load_psw_mask(mask); 32 } 33 34 static void mmu_enable(pgd_t *pgtable) 35 { 36 struct lowcore *lc = NULL; 37 const uint64_t asce = __pa(pgtable) | ASCE_DT_REGION1 | 38 REGION_TABLE_LENGTH; 39 40 /* set primary asce */ 41 lctlg(1, asce); 42 assert(stctg(1) == asce); 43 44 /* enable dat (primary == 0 set as default) */ 45 configure_dat(1); 46 47 /* we can now also use DAT unconditionally in our PGM handler */ 48 lc->pgm_new_psw.mask |= PSW_MASK_DAT; 49 } 50 51 static pteval_t *get_pte(pgd_t *pgtable, uintptr_t vaddr) 52 { 53 pgd_t *pgd = pgd_offset(pgtable, vaddr); 54 p4d_t *p4d = p4d_alloc(pgd, vaddr); 55 pud_t *pud = pud_alloc(p4d, vaddr); 56 pmd_t *pmd = pmd_alloc(pud, vaddr); 57 pte_t *pte = pte_alloc(pmd, vaddr); 58 59 return &pte_val(*pte); 60 } 61 62 phys_addr_t virt_to_pte_phys(pgd_t *pgtable, void *vaddr) 63 { 64 return (*get_pte(pgtable, (uintptr_t)vaddr) & PAGE_MASK) + 65 ((unsigned long)vaddr & ~PAGE_MASK); 66 } 67 68 static pteval_t *set_pte(pgd_t *pgtable, pteval_t val, void *vaddr) 69 { 70 pteval_t *p_pte = get_pte(pgtable, (uintptr_t)vaddr); 71 72 /* first flush the old entry (if we're replacing anything) */ 73 if (!(*p_pte & PAGE_ENTRY_I)) 74 ipte((uintptr_t)vaddr, p_pte); 75 76 *p_pte = val; 77 return p_pte; 78 } 79 80 pteval_t *install_page(pgd_t *pgtable, phys_addr_t phys, void *vaddr) 81 { 82 return set_pte(pgtable, __pa(phys), vaddr); 83 } 84 85 void protect_page(void *vaddr, unsigned long prot) 86 { 87 pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr); 88 pteval_t n_pte = *p_pte | prot; 89 90 set_pte(table_root, n_pte, vaddr); 91 } 92 93 void unprotect_page(void *vaddr, unsigned long prot) 94 { 95 pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr); 96 pteval_t n_pte = *p_pte & ~prot; 97 98 set_pte(table_root, n_pte, vaddr); 99 } 100 101 void protect_range(void *start, unsigned long len, unsigned long prot) 102 { 103 uintptr_t curr = (uintptr_t)start & PAGE_MASK; 104 105 len &= PAGE_MASK; 106 for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE) 107 protect_page((void *)curr, prot); 108 } 109 110 void unprotect_range(void *start, unsigned long len, unsigned long prot) 111 { 112 uintptr_t curr = (uintptr_t)start & PAGE_MASK; 113 114 len &= PAGE_MASK; 115 for (; len; len -= PAGE_SIZE, curr += PAGE_SIZE) 116 unprotect_page((void *)curr, prot); 117 } 118 119 static void setup_identity(pgd_t *pgtable, phys_addr_t start_addr, 120 phys_addr_t end_addr) 121 { 122 phys_addr_t cur; 123 124 start_addr &= PAGE_MASK; 125 for (cur = start_addr; true; cur += PAGE_SIZE) { 126 if (start_addr < end_addr && cur >= end_addr) 127 break; 128 if (start_addr > end_addr && cur <= end_addr) 129 break; 130 install_page(pgtable, cur, __va(cur)); 131 } 132 } 133 134 void *setup_mmu(phys_addr_t phys_end){ 135 pgd_t *page_root; 136 137 /* allocate a region-1 table */ 138 page_root = pgd_alloc_one(); 139 140 /* map all physical memory 1:1 */ 141 setup_identity(page_root, 0, phys_end); 142 143 /* generate 128MB of invalid adresses at the end (for testing PGM) */ 144 init_alloc_vpage((void *) -(1UL << 27)); 145 setup_identity(page_root, -(1UL << 27), 0); 146 147 /* finally enable DAT with the new table */ 148 mmu_enable(page_root); 149 table_root = page_root; 150 return page_root; 151 } 152