1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * LoongArch CPU helpers for qemu
4 *
5 * Copyright (c) 2024 Loongson Technology Corporation Limited
6 *
7 */
8
9 #include "qemu/osdep.h"
10 #include "system/tcg.h"
11 #include "cpu.h"
12 #include "accel/tcg/cpu-mmu-index.h"
13 #include "exec/target_page.h"
14 #include "internals.h"
15 #include "cpu-csr.h"
16 #include "tcg/tcg_loongarch.h"
17
get_dir_base_width(CPULoongArchState * env,uint64_t * dir_base,uint64_t * dir_width,target_ulong level)18 void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
19 uint64_t *dir_width, target_ulong level)
20 {
21 switch (level) {
22 case 1:
23 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
24 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
25 break;
26 case 2:
27 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
28 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
29 break;
30 case 3:
31 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
32 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
33 break;
34 case 4:
35 *dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
36 *dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
37 break;
38 default:
39 /* level may be zero for ldpte */
40 *dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
41 *dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
42 break;
43 }
44 }
45
loongarch_page_table_walker(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address)46 static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical,
47 int *prot, target_ulong address)
48 {
49 CPUState *cs = env_cpu(env);
50 target_ulong index, phys;
51 uint64_t dir_base, dir_width;
52 uint64_t base;
53 int level;
54
55 if ((address >> 63) & 0x1) {
56 base = env->CSR_PGDH;
57 } else {
58 base = env->CSR_PGDL;
59 }
60 base &= TARGET_PHYS_MASK;
61
62 for (level = 4; level > 0; level--) {
63 get_dir_base_width(env, &dir_base, &dir_width, level);
64
65 if (dir_width == 0) {
66 continue;
67 }
68
69 /* get next level page directory */
70 index = (address >> dir_base) & ((1 << dir_width) - 1);
71 phys = base | index << 3;
72 base = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
73 if (FIELD_EX64(base, TLBENTRY, HUGE)) {
74 /* base is a huge pte */
75 break;
76 }
77 }
78
79 /* pte */
80 if (FIELD_EX64(base, TLBENTRY, HUGE)) {
81 /* Huge Page. base is pte */
82 base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
83 base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
84 if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
85 base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
86 base = FIELD_DP64(base, TLBENTRY, G, 1);
87 }
88 } else {
89 /* Normal Page. base points to pte */
90 get_dir_base_width(env, &dir_base, &dir_width, 0);
91 index = (address >> dir_base) & ((1 << dir_width) - 1);
92 phys = base | index << 3;
93 base = ldq_phys(cs->as, phys);
94 }
95
96 /* TODO: check plv and other bits? */
97
98 /* base is pte, in normal pte format */
99 if (!FIELD_EX64(base, TLBENTRY, V)) {
100 return TLBRET_NOMATCH;
101 }
102
103 if (!FIELD_EX64(base, TLBENTRY, D)) {
104 *prot = PAGE_READ;
105 } else {
106 *prot = PAGE_READ | PAGE_WRITE;
107 }
108
109 /* get TARGET_PAGE_SIZE aligned physical address */
110 base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1);
111 /* mask RPLV, NX, NR bits */
112 base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0);
113 base = FIELD_DP64(base, TLBENTRY_64, NX, 0);
114 base = FIELD_DP64(base, TLBENTRY_64, NR, 0);
115 /* mask other attribute bits */
116 *physical = base & TARGET_PAGE_MASK;
117
118 return 0;
119 }
120
loongarch_map_address(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type,int mmu_idx,int is_debug)121 static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
122 int *prot, target_ulong address,
123 MMUAccessType access_type, int mmu_idx,
124 int is_debug)
125 {
126 int ret;
127
128 if (tcg_enabled()) {
129 ret = loongarch_get_addr_from_tlb(env, physical, prot, address,
130 access_type, mmu_idx);
131 if (ret != TLBRET_NOMATCH) {
132 return ret;
133 }
134 }
135
136 if (is_debug) {
137 /*
138 * For debugger memory access, we want to do the map when there is a
139 * legal mapping, even if the mapping is not yet in TLB. return 0 if
140 * there is a valid map, else none zero.
141 */
142 return loongarch_page_table_walker(env, physical, prot, address);
143 }
144
145 return TLBRET_NOMATCH;
146 }
147
dmw_va2pa(CPULoongArchState * env,target_ulong va,target_ulong dmw)148 static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
149 target_ulong dmw)
150 {
151 if (is_la64(env)) {
152 return va & TARGET_VIRT_MASK;
153 } else {
154 uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG);
155 return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \
156 (pseg << R_CSR_DMW_32_VSEG_SHIFT);
157 }
158 }
159
get_physical_address(CPULoongArchState * env,hwaddr * physical,int * prot,target_ulong address,MMUAccessType access_type,int mmu_idx,int is_debug)160 int get_physical_address(CPULoongArchState *env, hwaddr *physical,
161 int *prot, target_ulong address,
162 MMUAccessType access_type, int mmu_idx, int is_debug)
163 {
164 int user_mode = mmu_idx == MMU_USER_IDX;
165 int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
166 uint32_t plv, base_c, base_v;
167 int64_t addr_high;
168 uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
169 uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
170
171 /* Check PG and DA */
172 if (da & !pg) {
173 *physical = address & TARGET_PHYS_MASK;
174 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
175 return TLBRET_MATCH;
176 }
177
178 plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
179 if (is_la64(env)) {
180 base_v = address >> R_CSR_DMW_64_VSEG_SHIFT;
181 } else {
182 base_v = address >> R_CSR_DMW_32_VSEG_SHIFT;
183 }
184 /* Check direct map window */
185 for (int i = 0; i < 4; i++) {
186 if (is_la64(env)) {
187 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG);
188 } else {
189 base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG);
190 }
191 if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
192 *physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
193 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
194 return TLBRET_MATCH;
195 }
196 }
197
198 /* Check valid extension */
199 addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
200 if (!(addr_high == 0 || addr_high == -1)) {
201 return TLBRET_BADADDR;
202 }
203
204 /* Mapped address */
205 return loongarch_map_address(env, physical, prot, address,
206 access_type, mmu_idx, is_debug);
207 }
208
loongarch_cpu_get_phys_page_debug(CPUState * cs,vaddr addr)209 hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
210 {
211 CPULoongArchState *env = cpu_env(cs);
212 hwaddr phys_addr;
213 int prot;
214
215 if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
216 cpu_mmu_index(cs, false), 1) != 0) {
217 return -1;
218 }
219 return phys_addr;
220 }
221