xref: /qemu/target/riscv/pmp.c (revision 2af4a82ab2cce3412ffc92cd4c96bd870e33bc8e)
1 /*
2  * QEMU RISC-V PMP (Physical Memory Protection)
3  *
4  * Author: Daire McNamara, daire.mcnamara@emdalo.com
5  *         Ivan Griffin, ivan.griffin@emdalo.com
6  *
7  * This provides a RISC-V Physical Memory Protection implementation
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2 or later, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "qemu/osdep.h"
23 #include "qemu/log.h"
24 #include "qapi/error.h"
25 #include "cpu.h"
26 #include "trace.h"
27 #include "exec/cputlb.h"
28 #include "exec/page-protection.h"
29 #include "exec/target_page.h"
30 
31 static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
32                           uint8_t val);
33 static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
34 
35 /*
36  * Convert the PMP permissions to match the truth table in the Smepmp spec.
37  */
pmp_get_smepmp_operation(uint8_t cfg)38 static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg)
39 {
40     return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) |
41            (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2);
42 }
43 
44 /*
45  * Accessor method to extract address matching type 'a field' from cfg reg
46  */
pmp_get_a_field(uint8_t cfg)47 static inline uint8_t pmp_get_a_field(uint8_t cfg)
48 {
49     uint8_t a = cfg >> 3;
50     return a & 0x3;
51 }
52 
53 /*
54  * Check whether a PMP is locked or not.
55  */
pmp_is_locked(CPURISCVState * env,uint32_t pmp_index)56 static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
57 {
58     if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
59         return 1;
60     }
61 
62     return 0;
63 }
64 
65 /*
66  * Check whether a PMP is locked for writing or not.
67  * (i.e. has LOCK flag and mseccfg.RLB is unset)
68  */
pmp_is_readonly(CPURISCVState * env,uint32_t pmp_index)69 static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index)
70 {
71     return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env);
72 }
73 
74 /*
75  * Check whether `val` is an invalid Smepmp config value
76  */
pmp_is_invalid_smepmp_cfg(CPURISCVState * env,uint8_t val)77 static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val)
78 {
79     /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */
80     if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) {
81         return 0;
82     }
83 
84     /*
85      * Adding a rule with executable privileges that either is M-mode-only
86      * or a locked Shared-Region is not possible
87      */
88     switch (pmp_get_smepmp_operation(val)) {
89     case 0:
90     case 1:
91     case 2:
92     case 3:
93     case 4:
94     case 5:
95     case 6:
96     case 7:
97     case 8:
98     case 12:
99     case 14:
100     case 15:
101         return 0;
102     case 9:
103     case 10:
104     case 11:
105     case 13:
106         return 1;
107     default:
108         g_assert_not_reached();
109     }
110 }
111 
112 /*
113  * Count the number of active rules.
114  */
pmp_get_num_rules(CPURISCVState * env)115 uint32_t pmp_get_num_rules(CPURISCVState *env)
116 {
117      return env->pmp_state.num_rules;
118 }
119 
120 /*
121  * Accessor to get the cfg reg for a specific PMP/HART
122  */
pmp_read_cfg(CPURISCVState * env,uint32_t pmp_index)123 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
124 {
125     if (pmp_index < MAX_RISCV_PMPS) {
126         return env->pmp_state.pmp[pmp_index].cfg_reg;
127     }
128 
129     return 0;
130 }
131 
132 
133 /*
134  * Accessor to set the cfg reg for a specific PMP/HART
135  * Bounds checks and relevant lock bit.
136  */
pmp_write_cfg(CPURISCVState * env,uint32_t pmp_index,uint8_t val)137 static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
138 {
139     if (pmp_index < MAX_RISCV_PMPS) {
140         if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
141             /* no change */
142             return false;
143         }
144 
145         if (pmp_is_readonly(env, pmp_index)) {
146             qemu_log_mask(LOG_GUEST_ERROR,
147                           "ignoring pmpcfg write - read only\n");
148         } else if (pmp_is_invalid_smepmp_cfg(env, val)) {
149             qemu_log_mask(LOG_GUEST_ERROR,
150                           "ignoring pmpcfg write - invalid\n");
151         } else {
152             env->pmp_state.pmp[pmp_index].cfg_reg = val;
153             pmp_update_rule_addr(env, pmp_index);
154             return true;
155         }
156     } else {
157         qemu_log_mask(LOG_GUEST_ERROR,
158                       "ignoring pmpcfg write - out of bounds\n");
159     }
160 
161     return false;
162 }
163 
pmp_unlock_entries(CPURISCVState * env)164 void pmp_unlock_entries(CPURISCVState *env)
165 {
166     uint32_t pmp_num = pmp_get_num_rules(env);
167     int i;
168 
169     for (i = 0; i < pmp_num; i++) {
170         env->pmp_state.pmp[i].cfg_reg &= ~(PMP_LOCK | PMP_AMATCH);
171     }
172 }
173 
pmp_decode_napot(hwaddr a,hwaddr * sa,hwaddr * ea)174 static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea)
175 {
176     /*
177      * aaaa...aaa0   8-byte NAPOT range
178      * aaaa...aa01   16-byte NAPOT range
179      * aaaa...a011   32-byte NAPOT range
180      * ...
181      * aa01...1111   2^XLEN-byte NAPOT range
182      * a011...1111   2^(XLEN+1)-byte NAPOT range
183      * 0111...1111   2^(XLEN+2)-byte NAPOT range
184      * 1111...1111   Reserved
185      */
186     a = (a << 2) | 0x3;
187     *sa = a & (a + 1);
188     *ea = a | (a + 1);
189 }
190 
pmp_update_rule_addr(CPURISCVState * env,uint32_t pmp_index)191 void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
192 {
193     uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
194     target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
195     target_ulong prev_addr = 0u;
196     hwaddr sa = 0u;
197     hwaddr ea = 0u;
198 
199     if (pmp_index >= 1u) {
200         prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
201     }
202 
203     switch (pmp_get_a_field(this_cfg)) {
204     case PMP_AMATCH_OFF:
205         sa = 0u;
206         ea = -1;
207         break;
208 
209     case PMP_AMATCH_TOR:
210         sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
211         ea = (this_addr << 2) - 1u;
212         if (sa > ea) {
213             sa = ea = 0u;
214         }
215         break;
216 
217     case PMP_AMATCH_NA4:
218         sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
219         ea = (sa + 4u) - 1u;
220         break;
221 
222     case PMP_AMATCH_NAPOT:
223         pmp_decode_napot(this_addr, &sa, &ea);
224         break;
225 
226     default:
227         sa = 0u;
228         ea = 0u;
229         break;
230     }
231 
232     env->pmp_state.addr[pmp_index].sa = sa;
233     env->pmp_state.addr[pmp_index].ea = ea;
234 }
235 
pmp_update_rule_nums(CPURISCVState * env)236 void pmp_update_rule_nums(CPURISCVState *env)
237 {
238     int i;
239 
240     env->pmp_state.num_rules = 0;
241     for (i = 0; i < MAX_RISCV_PMPS; i++) {
242         const uint8_t a_field =
243             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
244         if (PMP_AMATCH_OFF != a_field) {
245             env->pmp_state.num_rules++;
246         }
247     }
248 }
249 
pmp_is_in_range(CPURISCVState * env,int pmp_index,hwaddr addr)250 static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr)
251 {
252     int result = 0;
253 
254     if ((addr >= env->pmp_state.addr[pmp_index].sa) &&
255         (addr <= env->pmp_state.addr[pmp_index].ea)) {
256         result = 1;
257     } else {
258         result = 0;
259     }
260 
261     return result;
262 }
263 
264 /*
265  * Check if the address has required RWX privs when no PMP entry is matched.
266  */
pmp_hart_has_privs_default(CPURISCVState * env,pmp_priv_t privs,pmp_priv_t * allowed_privs,target_ulong mode)267 static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs,
268                                        pmp_priv_t *allowed_privs,
269                                        target_ulong mode)
270 {
271     bool ret;
272 
273     if (MSECCFG_MMWP_ISSET(env)) {
274         /*
275          * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
276          * so we default to deny all, even for M-mode.
277          */
278         *allowed_privs = 0;
279         return false;
280     } else if (MSECCFG_MML_ISSET(env)) {
281         /*
282          * The Machine Mode Lockdown (mseccfg.MML) bit is set
283          * so we can only execute code in M-mode with an applicable
284          * rule. Other modes are disabled.
285          */
286         if (mode == PRV_M && !(privs & PMP_EXEC)) {
287             ret = true;
288             *allowed_privs = PMP_READ | PMP_WRITE;
289         } else {
290             ret = false;
291             *allowed_privs = 0;
292         }
293 
294         return ret;
295     }
296 
297     if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) {
298         /*
299          * Privileged spec v1.10 states if HW doesn't implement any PMP entry
300          * or no PMP entry matches an M-Mode access, the access succeeds.
301          */
302         ret = true;
303         *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
304     } else {
305         /*
306          * Other modes are not allowed to succeed if they don't * match a rule,
307          * but there are rules. We've checked for no rule earlier in this
308          * function.
309          */
310         ret = false;
311         *allowed_privs = 0;
312     }
313 
314     return ret;
315 }
316 
317 
318 /*
319  * Public Interface
320  */
321 
322 /*
323  * Check if the address has required RWX privs to complete desired operation
324  * Return true if a pmp rule match or default match
325  * Return false if no match
326  */
pmp_hart_has_privs(CPURISCVState * env,hwaddr addr,target_ulong size,pmp_priv_t privs,pmp_priv_t * allowed_privs,target_ulong mode)327 bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
328                         target_ulong size, pmp_priv_t privs,
329                         pmp_priv_t *allowed_privs, target_ulong mode)
330 {
331     int i = 0;
332     int pmp_size = 0;
333     hwaddr s = 0;
334     hwaddr e = 0;
335 
336     /* Short cut if no rules */
337     if (0 == pmp_get_num_rules(env)) {
338         return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
339     }
340 
341     if (size == 0) {
342         if (riscv_cpu_cfg(env)->mmu) {
343             /*
344              * If size is unknown (0), assume that all bytes
345              * from addr to the end of the page will be accessed.
346              */
347             pmp_size = -(addr | TARGET_PAGE_MASK);
348         } else {
349             pmp_size = 2 << riscv_cpu_mxl(env);
350         }
351     } else {
352         pmp_size = size;
353     }
354 
355     /*
356      * 1.10 draft priv spec states there is an implicit order
357      * from low to high
358      */
359     for (i = 0; i < MAX_RISCV_PMPS; i++) {
360         s = pmp_is_in_range(env, i, addr);
361         e = pmp_is_in_range(env, i, addr + pmp_size - 1);
362 
363         /* partially inside */
364         if ((s + e) == 1) {
365             qemu_log_mask(LOG_GUEST_ERROR,
366                           "pmp violation - access is partially inside\n");
367             *allowed_privs = 0;
368             return false;
369         }
370 
371         /* fully inside */
372         const uint8_t a_field =
373             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
374 
375         if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
376             /*
377              * If the PMP entry is not off and the address is in range,
378              * do the priv check
379              */
380             if (!MSECCFG_MML_ISSET(env)) {
381                 /*
382                  * If mseccfg.MML Bit is not set, do pmp priv check
383                  * This will always apply to regular PMP.
384                  */
385                 *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
386                 if ((mode != PRV_M) || pmp_is_locked(env, i)) {
387                     *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
388                 }
389             } else {
390                 /*
391                  * If mseccfg.MML Bit set, do the enhanced pmp priv check
392                  */
393                 const uint8_t smepmp_operation =
394                     pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg);
395 
396                 if (mode == PRV_M) {
397                     switch (smepmp_operation) {
398                     case 0:
399                     case 1:
400                     case 4:
401                     case 5:
402                     case 6:
403                     case 7:
404                     case 8:
405                         *allowed_privs = 0;
406                         break;
407                     case 2:
408                     case 3:
409                     case 14:
410                         *allowed_privs = PMP_READ | PMP_WRITE;
411                         break;
412                     case 9:
413                     case 10:
414                         *allowed_privs = PMP_EXEC;
415                         break;
416                     case 11:
417                     case 13:
418                         *allowed_privs = PMP_READ | PMP_EXEC;
419                         break;
420                     case 12:
421                     case 15:
422                         *allowed_privs = PMP_READ;
423                         break;
424                     default:
425                         g_assert_not_reached();
426                     }
427                 } else {
428                     switch (smepmp_operation) {
429                     case 0:
430                     case 8:
431                     case 9:
432                     case 12:
433                     case 13:
434                     case 14:
435                         *allowed_privs = 0;
436                         break;
437                     case 1:
438                     case 10:
439                     case 11:
440                         *allowed_privs = PMP_EXEC;
441                         break;
442                     case 2:
443                     case 4:
444                     case 15:
445                         *allowed_privs = PMP_READ;
446                         break;
447                     case 3:
448                     case 6:
449                         *allowed_privs = PMP_READ | PMP_WRITE;
450                         break;
451                     case 5:
452                         *allowed_privs = PMP_READ | PMP_EXEC;
453                         break;
454                     case 7:
455                         *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
456                         break;
457                     default:
458                         g_assert_not_reached();
459                     }
460                 }
461             }
462 
463             /*
464              * If matching address range was found, the protection bits
465              * defined with PMP must be used. We shouldn't fallback on
466              * finding default privileges.
467              */
468             return (privs & *allowed_privs) == privs;
469         }
470     }
471 
472     /* No rule matched */
473     return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
474 }
475 
476 /*
477  * Handle a write to a pmpcfg CSR
478  */
pmpcfg_csr_write(CPURISCVState * env,uint32_t reg_index,target_ulong val)479 void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
480                       target_ulong val)
481 {
482     int i;
483     uint8_t cfg_val;
484     int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
485     bool modified = false;
486 
487     trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
488 
489     for (i = 0; i < pmpcfg_nums; i++) {
490         cfg_val = (val >> 8 * i)  & 0xff;
491         modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
492     }
493 
494     /* If PMP permission of any addr has been changed, flush TLB pages. */
495     if (modified) {
496         pmp_update_rule_nums(env);
497         tlb_flush(env_cpu(env));
498     }
499 }
500 
501 
502 /*
503  * Handle a read from a pmpcfg CSR
504  */
pmpcfg_csr_read(CPURISCVState * env,uint32_t reg_index)505 target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
506 {
507     int i;
508     target_ulong cfg_val = 0;
509     target_ulong val = 0;
510     int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
511 
512     for (i = 0; i < pmpcfg_nums; i++) {
513         val = pmp_read_cfg(env, (reg_index * 4) + i);
514         cfg_val |= (val << (i * 8));
515     }
516     trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val);
517 
518     return cfg_val;
519 }
520 
521 
522 /*
523  * Handle a write to a pmpaddr CSR
524  */
pmpaddr_csr_write(CPURISCVState * env,uint32_t addr_index,target_ulong val)525 void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
526                        target_ulong val)
527 {
528     trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
529     bool is_next_cfg_tor = false;
530 
531     if (addr_index < MAX_RISCV_PMPS) {
532         if (env->pmp_state.pmp[addr_index].addr_reg == val) {
533             /* no change */
534             return;
535         }
536 
537         /*
538          * In TOR mode, need to check the lock bit of the next pmp
539          * (if there is a next).
540          */
541         if (addr_index + 1 < MAX_RISCV_PMPS) {
542             uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
543             is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
544 
545             if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) {
546                 qemu_log_mask(LOG_GUEST_ERROR,
547                               "ignoring pmpaddr write - pmpcfg+1 read only\n");
548                 return;
549             }
550         }
551 
552         if (!pmp_is_readonly(env, addr_index)) {
553             env->pmp_state.pmp[addr_index].addr_reg = val;
554             pmp_update_rule_addr(env, addr_index);
555             if (is_next_cfg_tor) {
556                 pmp_update_rule_addr(env, addr_index + 1);
557             }
558             tlb_flush(env_cpu(env));
559         } else {
560             qemu_log_mask(LOG_GUEST_ERROR,
561                           "ignoring pmpaddr write - read only\n");
562         }
563     } else {
564         qemu_log_mask(LOG_GUEST_ERROR,
565                       "ignoring pmpaddr write - out of bounds\n");
566     }
567 }
568 
569 
570 /*
571  * Handle a read from a pmpaddr CSR
572  */
pmpaddr_csr_read(CPURISCVState * env,uint32_t addr_index)573 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
574 {
575     target_ulong val = 0;
576 
577     if (addr_index < MAX_RISCV_PMPS) {
578         val = env->pmp_state.pmp[addr_index].addr_reg;
579         trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
580     } else {
581         qemu_log_mask(LOG_GUEST_ERROR,
582                       "ignoring pmpaddr read - out of bounds\n");
583     }
584 
585     return val;
586 }
587 
588 /*
589  * Handle a write to a mseccfg CSR
590  */
mseccfg_csr_write(CPURISCVState * env,target_ulong val)591 void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
592 {
593     int i;
594     uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
595     /* Update PMM field only if the value is valid according to Zjpm v1.0 */
596     if (riscv_cpu_cfg(env)->ext_smmpm &&
597         riscv_cpu_mxl(env) == MXL_RV64 &&
598         get_field(val, MSECCFG_PMM) != PMM_FIELD_RESERVED) {
599         mask |= MSECCFG_PMM;
600     }
601 
602     trace_mseccfg_csr_write(env->mhartid, val);
603 
604     /* RLB cannot be enabled if it's already 0 and if any regions are locked */
605     if (!MSECCFG_RLB_ISSET(env)) {
606         for (i = 0; i < MAX_RISCV_PMPS; i++) {
607             if (pmp_is_locked(env, i)) {
608                 val &= ~MSECCFG_RLB;
609                 break;
610             }
611         }
612     }
613 
614     if (riscv_cpu_cfg(env)->ext_smepmp) {
615         /* Sticky bits */
616         val |= (env->mseccfg & mask);
617         if ((val ^ env->mseccfg) & mask) {
618             tlb_flush(env_cpu(env));
619         }
620     } else {
621         mask |= MSECCFG_RLB;
622         val &= ~(mask);
623     }
624 
625     /* M-mode forward cfi to be enabled if cfi extension is implemented */
626     if (env_archcpu(env)->cfg.ext_zicfilp) {
627         val |= (val & MSECCFG_MLPE);
628     }
629 
630     env->mseccfg = val;
631 }
632 
633 /*
634  * Handle a read from a mseccfg CSR
635  */
mseccfg_csr_read(CPURISCVState * env)636 target_ulong mseccfg_csr_read(CPURISCVState *env)
637 {
638     trace_mseccfg_csr_read(env->mhartid, env->mseccfg);
639     return env->mseccfg;
640 }
641 
642 /*
643  * Calculate the TLB size.
644  * It's possible that PMP regions only cover partial of the TLB page, and
645  * this may split the page into regions with different permissions.
646  * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000
647  * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and
648  * the other regions in this page have RWX permissions.
649  * A write access to 0x80000000 will match PMP1. However we cannot cache the
650  * translation result in the TLB since this will make the write access to
651  * 0x80000008 bypass the check of PMP0.
652  * To avoid this we return a size of 1 (which means no caching) if the PMP
653  * region only covers partial of the TLB page.
654  */
pmp_get_tlb_size(CPURISCVState * env,hwaddr addr)655 target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
656 {
657     hwaddr pmp_sa;
658     hwaddr pmp_ea;
659     hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
660     hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
661     int i;
662 
663     /*
664      * If PMP is not supported or there are no PMP rules, the TLB page will not
665      * be split into regions with different permissions by PMP so we set the
666      * size to TARGET_PAGE_SIZE.
667      */
668     if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) {
669         return TARGET_PAGE_SIZE;
670     }
671 
672     for (i = 0; i < MAX_RISCV_PMPS; i++) {
673         if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
674             continue;
675         }
676 
677         pmp_sa = env->pmp_state.addr[i].sa;
678         pmp_ea = env->pmp_state.addr[i].ea;
679 
680         /*
681          * Only the first PMP entry that covers (whole or partial of) the TLB
682          * page really matters:
683          * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE,
684          * since the following PMP entries have lower priority and will not
685          * affect the permissions of the page.
686          * If it only covers partial of the TLB page, set the size to 1 since
687          * the allowed permissions of the region may be different from other
688          * region of the page.
689          */
690         if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
691             return TARGET_PAGE_SIZE;
692         } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) ||
693                    (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) {
694             return 1;
695         }
696     }
697 
698     /*
699      * If no PMP entry matches the TLB page, the TLB page will also not be
700      * split into regions with different permissions by PMP so we set the size
701      * to TARGET_PAGE_SIZE.
702      */
703     return TARGET_PAGE_SIZE;
704 }
705 
706 /*
707  * Convert PMP privilege to TLB page privilege.
708  */
pmp_priv_to_page_prot(pmp_priv_t pmp_priv)709 int pmp_priv_to_page_prot(pmp_priv_t pmp_priv)
710 {
711     int prot = 0;
712 
713     if (pmp_priv & PMP_READ) {
714         prot |= PAGE_READ;
715     }
716     if (pmp_priv & PMP_WRITE) {
717         prot |= PAGE_WRITE;
718     }
719     if (pmp_priv & PMP_EXEC) {
720         prot |= PAGE_EXEC;
721     }
722 
723     return prot;
724 }
725