xref: /qemu/target/arm/tcg/mte_helper.c (revision c15294c1e36a7dd9b25bd54d98178e80f4b64bc1)
1da54941fSRichard Henderson /*
2da54941fSRichard Henderson  * ARM v8.5-MemTag Operations
3da54941fSRichard Henderson  *
4da54941fSRichard Henderson  * Copyright (c) 2020 Linaro, Ltd.
5da54941fSRichard Henderson  *
6da54941fSRichard Henderson  * This library is free software; you can redistribute it and/or
7da54941fSRichard Henderson  * modify it under the terms of the GNU Lesser General Public
8da54941fSRichard Henderson  * License as published by the Free Software Foundation; either
9da54941fSRichard Henderson  * version 2.1 of the License, or (at your option) any later version.
10da54941fSRichard Henderson  *
11da54941fSRichard Henderson  * This library is distributed in the hope that it will be useful,
12da54941fSRichard Henderson  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13da54941fSRichard Henderson  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14da54941fSRichard Henderson  * Lesser General Public License for more details.
15da54941fSRichard Henderson  *
16da54941fSRichard Henderson  * You should have received a copy of the GNU Lesser General Public
17da54941fSRichard Henderson  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18da54941fSRichard Henderson  */
19da54941fSRichard Henderson 
20da54941fSRichard Henderson #include "qemu/osdep.h"
21da54941fSRichard Henderson #include "cpu.h"
22da54941fSRichard Henderson #include "internals.h"
23da54941fSRichard Henderson #include "exec/exec-all.h"
24da54941fSRichard Henderson #include "exec/cpu_ldst.h"
25da54941fSRichard Henderson #include "exec/helper-proto.h"
26da54941fSRichard Henderson 
27da54941fSRichard Henderson 
28da54941fSRichard Henderson static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
29da54941fSRichard Henderson {
30da54941fSRichard Henderson     if (exclude == 0xffff) {
31da54941fSRichard Henderson         return 0;
32da54941fSRichard Henderson     }
33da54941fSRichard Henderson     if (offset == 0) {
34da54941fSRichard Henderson         while (exclude & (1 << tag)) {
35da54941fSRichard Henderson             tag = (tag + 1) & 15;
36da54941fSRichard Henderson         }
37da54941fSRichard Henderson     } else {
38da54941fSRichard Henderson         do {
39da54941fSRichard Henderson             do {
40da54941fSRichard Henderson                 tag = (tag + 1) & 15;
41da54941fSRichard Henderson             } while (exclude & (1 << tag));
42da54941fSRichard Henderson         } while (--offset > 0);
43da54941fSRichard Henderson     }
44da54941fSRichard Henderson     return tag;
45da54941fSRichard Henderson }
46da54941fSRichard Henderson 
47*c15294c1SRichard Henderson /**
48*c15294c1SRichard Henderson  * allocation_tag_mem:
49*c15294c1SRichard Henderson  * @env: the cpu environment
50*c15294c1SRichard Henderson  * @ptr_mmu_idx: the addressing regime to use for the virtual address
51*c15294c1SRichard Henderson  * @ptr: the virtual address for which to look up tag memory
52*c15294c1SRichard Henderson  * @ptr_access: the access to use for the virtual address
53*c15294c1SRichard Henderson  * @ptr_size: the number of bytes in the normal memory access
54*c15294c1SRichard Henderson  * @tag_access: the access to use for the tag memory
55*c15294c1SRichard Henderson  * @tag_size: the number of bytes in the tag memory access
56*c15294c1SRichard Henderson  * @ra: the return address for exception handling
57*c15294c1SRichard Henderson  *
58*c15294c1SRichard Henderson  * Our tag memory is formatted as a sequence of little-endian nibbles.
59*c15294c1SRichard Henderson  * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
60*c15294c1SRichard Henderson  * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
61*c15294c1SRichard Henderson  * for the higher addr.
62*c15294c1SRichard Henderson  *
63*c15294c1SRichard Henderson  * Here, resolve the physical address from the virtual address, and return
64*c15294c1SRichard Henderson  * a pointer to the corresponding tag byte.  Exit with exception if the
65*c15294c1SRichard Henderson  * virtual address is not accessible for @ptr_access.
66*c15294c1SRichard Henderson  *
67*c15294c1SRichard Henderson  * The @ptr_size and @tag_size values may not have an obvious relation
68*c15294c1SRichard Henderson  * due to the alignment of @ptr, and the number of tag checks required.
69*c15294c1SRichard Henderson  *
70*c15294c1SRichard Henderson  * If there is no tag storage corresponding to @ptr, return NULL.
71*c15294c1SRichard Henderson  */
72*c15294c1SRichard Henderson static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
73*c15294c1SRichard Henderson                                    uint64_t ptr, MMUAccessType ptr_access,
74*c15294c1SRichard Henderson                                    int ptr_size, MMUAccessType tag_access,
75*c15294c1SRichard Henderson                                    int tag_size, uintptr_t ra)
76*c15294c1SRichard Henderson {
77*c15294c1SRichard Henderson     /* Tag storage not implemented.  */
78*c15294c1SRichard Henderson     return NULL;
79*c15294c1SRichard Henderson }
80*c15294c1SRichard Henderson 
81da54941fSRichard Henderson uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
82da54941fSRichard Henderson {
83da54941fSRichard Henderson     int rtag;
84da54941fSRichard Henderson 
85da54941fSRichard Henderson     /*
86da54941fSRichard Henderson      * Our IMPDEF choice for GCR_EL1.RRND==1 is to behave as if
87da54941fSRichard Henderson      * GCR_EL1.RRND==0, always producing deterministic results.
88da54941fSRichard Henderson      */
89da54941fSRichard Henderson     uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
90da54941fSRichard Henderson     int start = extract32(env->cp15.rgsr_el1, 0, 4);
91da54941fSRichard Henderson     int seed = extract32(env->cp15.rgsr_el1, 8, 16);
92da54941fSRichard Henderson     int offset, i;
93da54941fSRichard Henderson 
94da54941fSRichard Henderson     /* RandomTag */
95da54941fSRichard Henderson     for (i = offset = 0; i < 4; ++i) {
96da54941fSRichard Henderson         /* NextRandomTagBit */
97da54941fSRichard Henderson         int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
98da54941fSRichard Henderson                    extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
99da54941fSRichard Henderson         seed = (top << 15) | (seed >> 1);
100da54941fSRichard Henderson         offset |= top << i;
101da54941fSRichard Henderson     }
102da54941fSRichard Henderson     rtag = choose_nonexcluded_tag(start, offset, exclude);
103da54941fSRichard Henderson     env->cp15.rgsr_el1 = rtag | (seed << 8);
104da54941fSRichard Henderson 
105da54941fSRichard Henderson     return address_with_allocation_tag(rn, rtag);
106da54941fSRichard Henderson }
107efbc78adSRichard Henderson 
108efbc78adSRichard Henderson uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
109efbc78adSRichard Henderson                          int32_t offset, uint32_t tag_offset)
110efbc78adSRichard Henderson {
111efbc78adSRichard Henderson     int start_tag = allocation_tag_from_addr(ptr);
112efbc78adSRichard Henderson     uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
113efbc78adSRichard Henderson     int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
114efbc78adSRichard Henderson 
115efbc78adSRichard Henderson     return address_with_allocation_tag(ptr + offset, rtag);
116efbc78adSRichard Henderson }
117*c15294c1SRichard Henderson 
118*c15294c1SRichard Henderson static int load_tag1(uint64_t ptr, uint8_t *mem)
119*c15294c1SRichard Henderson {
120*c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
121*c15294c1SRichard Henderson     return extract32(*mem, ofs, 4);
122*c15294c1SRichard Henderson }
123*c15294c1SRichard Henderson 
124*c15294c1SRichard Henderson uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
125*c15294c1SRichard Henderson {
126*c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
127*c15294c1SRichard Henderson     uint8_t *mem;
128*c15294c1SRichard Henderson     int rtag = 0;
129*c15294c1SRichard Henderson 
130*c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
131*c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
132*c15294c1SRichard Henderson                              MMU_DATA_LOAD, 1, GETPC());
133*c15294c1SRichard Henderson 
134*c15294c1SRichard Henderson     /* Load if page supports tags. */
135*c15294c1SRichard Henderson     if (mem) {
136*c15294c1SRichard Henderson         rtag = load_tag1(ptr, mem);
137*c15294c1SRichard Henderson     }
138*c15294c1SRichard Henderson 
139*c15294c1SRichard Henderson     return address_with_allocation_tag(xt, rtag);
140*c15294c1SRichard Henderson }
141*c15294c1SRichard Henderson 
142*c15294c1SRichard Henderson static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
143*c15294c1SRichard Henderson {
144*c15294c1SRichard Henderson     if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
145*c15294c1SRichard Henderson         arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
146*c15294c1SRichard Henderson                                     cpu_mmu_index(env, false), ra);
147*c15294c1SRichard Henderson         g_assert_not_reached();
148*c15294c1SRichard Henderson     }
149*c15294c1SRichard Henderson }
150*c15294c1SRichard Henderson 
151*c15294c1SRichard Henderson /* For use in a non-parallel context, store to the given nibble.  */
152*c15294c1SRichard Henderson static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
153*c15294c1SRichard Henderson {
154*c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
155*c15294c1SRichard Henderson     *mem = deposit32(*mem, ofs, 4, tag);
156*c15294c1SRichard Henderson }
157*c15294c1SRichard Henderson 
158*c15294c1SRichard Henderson /* For use in a parallel context, atomically store to the given nibble.  */
159*c15294c1SRichard Henderson static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
160*c15294c1SRichard Henderson {
161*c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
162*c15294c1SRichard Henderson     uint8_t old = atomic_read(mem);
163*c15294c1SRichard Henderson 
164*c15294c1SRichard Henderson     while (1) {
165*c15294c1SRichard Henderson         uint8_t new = deposit32(old, ofs, 4, tag);
166*c15294c1SRichard Henderson         uint8_t cmp = atomic_cmpxchg(mem, old, new);
167*c15294c1SRichard Henderson         if (likely(cmp == old)) {
168*c15294c1SRichard Henderson             return;
169*c15294c1SRichard Henderson         }
170*c15294c1SRichard Henderson         old = cmp;
171*c15294c1SRichard Henderson     }
172*c15294c1SRichard Henderson }
173*c15294c1SRichard Henderson 
174*c15294c1SRichard Henderson typedef void stg_store1(uint64_t, uint8_t *, int);
175*c15294c1SRichard Henderson 
176*c15294c1SRichard Henderson static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
177*c15294c1SRichard Henderson                           uintptr_t ra, stg_store1 store1)
178*c15294c1SRichard Henderson {
179*c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
180*c15294c1SRichard Henderson     uint8_t *mem;
181*c15294c1SRichard Henderson 
182*c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
183*c15294c1SRichard Henderson 
184*c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
185*c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
186*c15294c1SRichard Henderson                              MMU_DATA_STORE, 1, ra);
187*c15294c1SRichard Henderson 
188*c15294c1SRichard Henderson     /* Store if page supports tags. */
189*c15294c1SRichard Henderson     if (mem) {
190*c15294c1SRichard Henderson         store1(ptr, mem, allocation_tag_from_addr(xt));
191*c15294c1SRichard Henderson     }
192*c15294c1SRichard Henderson }
193*c15294c1SRichard Henderson 
194*c15294c1SRichard Henderson void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
195*c15294c1SRichard Henderson {
196*c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1);
197*c15294c1SRichard Henderson }
198*c15294c1SRichard Henderson 
199*c15294c1SRichard Henderson void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
200*c15294c1SRichard Henderson {
201*c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
202*c15294c1SRichard Henderson }
203*c15294c1SRichard Henderson 
204*c15294c1SRichard Henderson void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
205*c15294c1SRichard Henderson {
206*c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
207*c15294c1SRichard Henderson     uintptr_t ra = GETPC();
208*c15294c1SRichard Henderson 
209*c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
210*c15294c1SRichard Henderson     probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
211*c15294c1SRichard Henderson }
212*c15294c1SRichard Henderson 
213*c15294c1SRichard Henderson static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
214*c15294c1SRichard Henderson                            uintptr_t ra, stg_store1 store1)
215*c15294c1SRichard Henderson {
216*c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
217*c15294c1SRichard Henderson     int tag = allocation_tag_from_addr(xt);
218*c15294c1SRichard Henderson     uint8_t *mem1, *mem2;
219*c15294c1SRichard Henderson 
220*c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
221*c15294c1SRichard Henderson 
222*c15294c1SRichard Henderson     /*
223*c15294c1SRichard Henderson      * Trap if accessing an invalid page(s).
224*c15294c1SRichard Henderson      * This takes priority over !allocation_tag_access_enabled.
225*c15294c1SRichard Henderson      */
226*c15294c1SRichard Henderson     if (ptr & TAG_GRANULE) {
227*c15294c1SRichard Henderson         /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
228*c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
229*c15294c1SRichard Henderson                                   TAG_GRANULE, MMU_DATA_STORE, 1, ra);
230*c15294c1SRichard Henderson         mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
231*c15294c1SRichard Henderson                                   MMU_DATA_STORE, TAG_GRANULE,
232*c15294c1SRichard Henderson                                   MMU_DATA_STORE, 1, ra);
233*c15294c1SRichard Henderson 
234*c15294c1SRichard Henderson         /* Store if page(s) support tags. */
235*c15294c1SRichard Henderson         if (mem1) {
236*c15294c1SRichard Henderson             store1(TAG_GRANULE, mem1, tag);
237*c15294c1SRichard Henderson         }
238*c15294c1SRichard Henderson         if (mem2) {
239*c15294c1SRichard Henderson             store1(0, mem2, tag);
240*c15294c1SRichard Henderson         }
241*c15294c1SRichard Henderson     } else {
242*c15294c1SRichard Henderson         /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
243*c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
244*c15294c1SRichard Henderson                                   2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
245*c15294c1SRichard Henderson         if (mem1) {
246*c15294c1SRichard Henderson             tag |= tag << 4;
247*c15294c1SRichard Henderson             atomic_set(mem1, tag);
248*c15294c1SRichard Henderson         }
249*c15294c1SRichard Henderson     }
250*c15294c1SRichard Henderson }
251*c15294c1SRichard Henderson 
252*c15294c1SRichard Henderson void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
253*c15294c1SRichard Henderson {
254*c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1);
255*c15294c1SRichard Henderson }
256*c15294c1SRichard Henderson 
257*c15294c1SRichard Henderson void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
258*c15294c1SRichard Henderson {
259*c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
260*c15294c1SRichard Henderson }
261*c15294c1SRichard Henderson 
262*c15294c1SRichard Henderson void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
263*c15294c1SRichard Henderson {
264*c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
265*c15294c1SRichard Henderson     uintptr_t ra = GETPC();
266*c15294c1SRichard Henderson     int in_page = -(ptr | TARGET_PAGE_MASK);
267*c15294c1SRichard Henderson 
268*c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
269*c15294c1SRichard Henderson 
270*c15294c1SRichard Henderson     if (likely(in_page >= 2 * TAG_GRANULE)) {
271*c15294c1SRichard Henderson         probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
272*c15294c1SRichard Henderson     } else {
273*c15294c1SRichard Henderson         probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
274*c15294c1SRichard Henderson         probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
275*c15294c1SRichard Henderson     }
276*c15294c1SRichard Henderson }
277