1 /* 2 * Microblaze MMU emulation for qemu. 3 * 4 * Copyright (c) 2009 Edgar E. Iglesias 5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd. 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qemu/log.h" 23 #include "cpu.h" 24 #include "exec/cputlb.h" 25 #include "accel/tcg/cpu-mmu-index.h" 26 #include "exec/page-protection.h" 27 #include "exec/target_page.h" 28 29 static unsigned int tlb_decode_size(unsigned int f) 30 { 31 static const unsigned int sizes[] = { 32 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024, 33 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024 34 }; 35 assert(f < ARRAY_SIZE(sizes)); 36 return sizes[f]; 37 } 38 39 static void mmu_flush_idx(CPUMBState *env, unsigned int idx) 40 { 41 CPUState *cs = env_cpu(env); 42 MicroBlazeMMU *mmu = &env->mmu; 43 unsigned int tlb_size; 44 uint32_t tlb_tag, end, t; 45 46 t = mmu->rams[RAM_TAG][idx]; 47 if (!(t & TLB_VALID)) 48 return; 49 50 tlb_tag = t & TLB_EPN_MASK; 51 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); 52 end = tlb_tag + tlb_size; 53 54 while (tlb_tag < end) { 55 tlb_flush_page(cs, tlb_tag); 56 tlb_tag += TARGET_PAGE_SIZE; 57 } 58 } 59 60 static void mmu_change_pid(CPUMBState *env, unsigned int newpid) 61 { 62 MicroBlazeMMU *mmu = &env->mmu; 63 unsigned int i; 64 uint32_t t; 65 66 if (newpid & ~0xff) 67 qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid); 68 69 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { 70 /* Lookup and decode. */ 71 t = mmu->rams[RAM_TAG][i]; 72 if (t & TLB_VALID) { 73 if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i])) 74 mmu_flush_idx(env, i); 75 } 76 } 77 } 78 79 /* rw - 0 = read, 1 = write, 2 = fetch. */ 80 unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu, 81 target_ulong vaddr, MMUAccessType rw, int mmu_idx) 82 { 83 MicroBlazeMMU *mmu = &cpu->env.mmu; 84 unsigned int i, hit = 0; 85 unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel; 86 uint64_t tlb_tag, tlb_rpn, mask; 87 uint32_t tlb_size, t0; 88 89 lu->err = ERR_MISS; 90 for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) { 91 uint64_t t, d; 92 93 /* Lookup and decode. */ 94 t = mmu->rams[RAM_TAG][i]; 95 if (t & TLB_VALID) { 96 tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7); 97 if (tlb_size < TARGET_PAGE_SIZE) { 98 qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size); 99 abort(); 100 } 101 102 mask = ~((uint64_t)tlb_size - 1); 103 tlb_tag = t & TLB_EPN_MASK; 104 if ((vaddr & mask) != (tlb_tag & mask)) { 105 continue; 106 } 107 if (mmu->tids[i] 108 && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) { 109 continue; 110 } 111 112 /* Bring in the data part. */ 113 d = mmu->rams[RAM_DATA][i]; 114 tlb_ex = d & TLB_EX; 115 tlb_wr = d & TLB_WR; 116 117 /* Now let's see if there is a zone that overrides the protbits. */ 118 tlb_zsel = (d >> 4) & 0xf; 119 t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2)); 120 t0 &= 0x3; 121 122 if (tlb_zsel > cpu->cfg.mmu_zones) { 123 qemu_log_mask(LOG_GUEST_ERROR, 124 "tlb zone select out of range! %d\n", tlb_zsel); 125 t0 = 1; /* Ignore. */ 126 } 127 128 if (cpu->cfg.mmu == 1) { 129 t0 = 1; /* Zones are disabled. */ 130 } 131 132 switch (t0) { 133 case 0: 134 if (mmu_idx == MMU_USER_IDX) 135 continue; 136 break; 137 case 2: 138 if (mmu_idx != MMU_USER_IDX) { 139 tlb_ex = 1; 140 tlb_wr = 1; 141 } 142 break; 143 case 3: 144 tlb_ex = 1; 145 tlb_wr = 1; 146 break; 147 default: break; 148 } 149 150 lu->err = ERR_PROT; 151 lu->prot = PAGE_READ; 152 if (tlb_wr) 153 lu->prot |= PAGE_WRITE; 154 else if (rw == 1) 155 goto done; 156 if (tlb_ex) 157 lu->prot |=PAGE_EXEC; 158 else if (rw == 2) { 159 goto done; 160 } 161 162 tlb_rpn = d & TLB_RPN_MASK; 163 164 lu->vaddr = tlb_tag; 165 lu->paddr = tlb_rpn & cpu->cfg.addr_mask; 166 lu->size = tlb_size; 167 lu->err = ERR_HIT; 168 lu->idx = i; 169 hit = 1; 170 goto done; 171 } 172 } 173 done: 174 qemu_log_mask(CPU_LOG_MMU, 175 "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n", 176 vaddr, rw, tlb_wr, tlb_ex, hit); 177 return hit; 178 } 179 180 /* Writes/reads to the MMU's special regs end up here. */ 181 uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn) 182 { 183 MicroBlazeCPU *cpu = env_archcpu(env); 184 unsigned int i; 185 uint32_t r = 0; 186 187 if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) { 188 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n"); 189 return 0; 190 } 191 if (ext && rn != MMU_R_TLBLO) { 192 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n"); 193 return 0; 194 } 195 196 switch (rn) { 197 /* Reads to HI/LO trig reads from the mmu rams. */ 198 case MMU_R_TLBLO: 199 case MMU_R_TLBHI: 200 if (!(cpu->cfg.mmu_tlb_access & 1)) { 201 qemu_log_mask(LOG_GUEST_ERROR, 202 "Invalid access to MMU reg %d\n", rn); 203 return 0; 204 } 205 206 i = env->mmu.regs[MMU_R_TLBX] & 0xff; 207 r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32); 208 if (rn == MMU_R_TLBHI) 209 env->mmu.regs[MMU_R_PID] = env->mmu.tids[i]; 210 break; 211 case MMU_R_PID: 212 case MMU_R_ZPR: 213 if (!(cpu->cfg.mmu_tlb_access & 1)) { 214 qemu_log_mask(LOG_GUEST_ERROR, 215 "Invalid access to MMU reg %d\n", rn); 216 return 0; 217 } 218 r = env->mmu.regs[rn]; 219 break; 220 case MMU_R_TLBX: 221 r = env->mmu.regs[rn]; 222 break; 223 case MMU_R_TLBSX: 224 qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n"); 225 break; 226 default: 227 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn); 228 break; 229 } 230 qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r); 231 return r; 232 } 233 234 void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v) 235 { 236 MicroBlazeCPU *cpu = env_archcpu(env); 237 uint64_t tmp64; 238 unsigned int i; 239 240 qemu_log_mask(CPU_LOG_MMU, 241 "%s rn=%d=%x old=%x\n", __func__, rn, v, 242 rn < 3 ? env->mmu.regs[rn] : env->mmu.regs[MMU_R_TLBX]); 243 244 if (cpu->cfg.mmu < 2 || !cpu->cfg.mmu_tlb_access) { 245 qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n"); 246 return; 247 } 248 if (ext && rn != MMU_R_TLBLO) { 249 qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n"); 250 return; 251 } 252 253 switch (rn) { 254 /* Writes to HI/LO trig writes to the mmu rams. */ 255 case MMU_R_TLBLO: 256 case MMU_R_TLBHI: 257 i = env->mmu.regs[MMU_R_TLBX] & 0xff; 258 if (rn == MMU_R_TLBHI) { 259 if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0)) 260 qemu_log_mask(LOG_GUEST_ERROR, 261 "invalidating index %x at pc=%x\n", 262 i, env->pc); 263 env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff; 264 mmu_flush_idx(env, i); 265 } 266 tmp64 = env->mmu.rams[rn & 1][i]; 267 env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v); 268 break; 269 case MMU_R_ZPR: 270 if (cpu->cfg.mmu_tlb_access <= 1) { 271 qemu_log_mask(LOG_GUEST_ERROR, 272 "Invalid access to MMU reg %d\n", rn); 273 return; 274 } 275 276 /* Changes to the zone protection reg flush the QEMU TLB. 277 Fortunately, these are very uncommon. */ 278 if (v != env->mmu.regs[rn]) { 279 tlb_flush(env_cpu(env)); 280 } 281 env->mmu.regs[rn] = v; 282 break; 283 case MMU_R_PID: 284 if (cpu->cfg.mmu_tlb_access <= 1) { 285 qemu_log_mask(LOG_GUEST_ERROR, 286 "Invalid access to MMU reg %d\n", rn); 287 return; 288 } 289 290 if (v != env->mmu.regs[rn]) { 291 mmu_change_pid(env, v); 292 env->mmu.regs[rn] = v; 293 } 294 break; 295 case MMU_R_TLBX: 296 /* Bit 31 is read-only. */ 297 env->mmu.regs[rn] = deposit32(env->mmu.regs[rn], 0, 31, v); 298 break; 299 case MMU_R_TLBSX: 300 { 301 MicroBlazeMMULookup lu; 302 int hit; 303 304 if (cpu->cfg.mmu_tlb_access <= 1) { 305 qemu_log_mask(LOG_GUEST_ERROR, 306 "Invalid access to MMU reg %d\n", rn); 307 return; 308 } 309 310 hit = mmu_translate(cpu, &lu, v & TLB_EPN_MASK, 311 0, cpu_mmu_index(env_cpu(env), false)); 312 if (hit) { 313 env->mmu.regs[MMU_R_TLBX] = lu.idx; 314 } else { 315 env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK; 316 } 317 break; 318 } 319 default: 320 qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn); 321 break; 322 } 323 } 324 325 void mmu_init(MicroBlazeMMU *mmu) 326 { 327 int i; 328 for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) { 329 mmu->regs[i] = 0; 330 } 331 } 332