xref: /qemu/target/arm/tcg/mte_helper.c (revision 1bf0d6e476f34aadda8b052f747a5a5026119de2)
1da54941fSRichard Henderson /*
2da54941fSRichard Henderson  * ARM v8.5-MemTag Operations
3da54941fSRichard Henderson  *
4da54941fSRichard Henderson  * Copyright (c) 2020 Linaro, Ltd.
5da54941fSRichard Henderson  *
6da54941fSRichard Henderson  * This library is free software; you can redistribute it and/or
7da54941fSRichard Henderson  * modify it under the terms of the GNU Lesser General Public
8da54941fSRichard Henderson  * License as published by the Free Software Foundation; either
9da54941fSRichard Henderson  * version 2.1 of the License, or (at your option) any later version.
10da54941fSRichard Henderson  *
11da54941fSRichard Henderson  * This library is distributed in the hope that it will be useful,
12da54941fSRichard Henderson  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13da54941fSRichard Henderson  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14da54941fSRichard Henderson  * Lesser General Public License for more details.
15da54941fSRichard Henderson  *
16da54941fSRichard Henderson  * You should have received a copy of the GNU Lesser General Public
17da54941fSRichard Henderson  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18da54941fSRichard Henderson  */
19da54941fSRichard Henderson 
20da54941fSRichard Henderson #include "qemu/osdep.h"
21cd617484SPhilippe Mathieu-Daudé #include "qemu/log.h"
22da54941fSRichard Henderson #include "cpu.h"
23da54941fSRichard Henderson #include "internals.h"
24da54941fSRichard Henderson #include "exec/exec-all.h"
2574781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
2662ef949bSPhilippe Mathieu-Daudé #ifdef CONFIG_USER_ONLY
27*1bf0d6e4SPhilippe Mathieu-Daudé #include "user/cpu_loop.h"
2862ef949bSPhilippe Mathieu-Daudé #include "user/page-protection.h"
2962ef949bSPhilippe Mathieu-Daudé #else
30e4d5bf4fSRichard Henderson #include "exec/ram_addr.h"
31a6b3f532SPhilippe Mathieu-Daudé #endif
32da54941fSRichard Henderson #include "exec/cpu_ldst.h"
33da54941fSRichard Henderson #include "exec/helper-proto.h"
346eece7f5SPhilippe Mathieu-Daudé #include "hw/core/tcg-cpu-ops.h"
35d4f6dda1SRichard Henderson #include "qapi/error.h"
36d4f6dda1SRichard Henderson #include "qemu/guest-random.h"
370c9b437cSGustavo Romero #include "mte_helper.h"
38da54941fSRichard Henderson 
39da54941fSRichard Henderson 
40da54941fSRichard Henderson static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
41da54941fSRichard Henderson {
42da54941fSRichard Henderson     if (exclude == 0xffff) {
43da54941fSRichard Henderson         return 0;
44da54941fSRichard Henderson     }
45da54941fSRichard Henderson     if (offset == 0) {
46da54941fSRichard Henderson         while (exclude & (1 << tag)) {
47da54941fSRichard Henderson             tag = (tag + 1) & 15;
48da54941fSRichard Henderson         }
49da54941fSRichard Henderson     } else {
50da54941fSRichard Henderson         do {
51da54941fSRichard Henderson             do {
52da54941fSRichard Henderson                 tag = (tag + 1) & 15;
53da54941fSRichard Henderson             } while (exclude & (1 << tag));
54da54941fSRichard Henderson         } while (--offset > 0);
55da54941fSRichard Henderson     }
56da54941fSRichard Henderson     return tag;
57da54941fSRichard Henderson }
58da54941fSRichard Henderson 
590c9b437cSGustavo Romero uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
60c15294c1SRichard Henderson                                   uint64_t ptr, MMUAccessType ptr_access,
61c15294c1SRichard Henderson                                   int ptr_size, MMUAccessType tag_access,
62aa03378bSPeter Maydell                                   bool probe, uintptr_t ra)
63c15294c1SRichard Henderson {
64e4d5bf4fSRichard Henderson #ifdef CONFIG_USER_ONLY
65a11d3830SRichard Henderson     uint64_t clean_ptr = useronly_clean_ptr(ptr);
66a11d3830SRichard Henderson     int flags = page_get_flags(clean_ptr);
67a11d3830SRichard Henderson     uint8_t *tags;
68a11d3830SRichard Henderson     uintptr_t index;
69a11d3830SRichard Henderson 
70aa03378bSPeter Maydell     assert(!(probe && ra));
71aa03378bSPeter Maydell 
72ff38bca7SRichard Henderson     if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
7341bfb670SGustavo Romero         if (probe) {
7441bfb670SGustavo Romero             return NULL;
7541bfb670SGustavo Romero         }
765e98763cSRichard Henderson         cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
775e98763cSRichard Henderson                               !(flags & PAGE_VALID), ra);
78a11d3830SRichard Henderson     }
79a11d3830SRichard Henderson 
80a11d3830SRichard Henderson     /* Require both MAP_ANON and PROT_MTE for the page. */
81a11d3830SRichard Henderson     if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
82c15294c1SRichard Henderson         return NULL;
83a11d3830SRichard Henderson     }
84a11d3830SRichard Henderson 
85a11d3830SRichard Henderson     tags = page_get_target_data(clean_ptr);
86a11d3830SRichard Henderson 
87a11d3830SRichard Henderson     index = extract32(ptr, LOG2_TAG_GRANULE + 1,
88a11d3830SRichard Henderson                       TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
89a11d3830SRichard Henderson     return tags + index;
90e4d5bf4fSRichard Henderson #else
9125d3ec58SRichard Henderson     CPUTLBEntryFull *full;
92b8967ddfSRichard Henderson     MemTxAttrs attrs;
93e4d5bf4fSRichard Henderson     int in_page, flags;
94e4d5bf4fSRichard Henderson     hwaddr ptr_paddr, tag_paddr, xlat;
95e4d5bf4fSRichard Henderson     MemoryRegion *mr;
96e4d5bf4fSRichard Henderson     ARMASIdx tag_asi;
97e4d5bf4fSRichard Henderson     AddressSpace *tag_as;
98e4d5bf4fSRichard Henderson     void *host;
99e4d5bf4fSRichard Henderson 
100e4d5bf4fSRichard Henderson     /*
101e4d5bf4fSRichard Henderson      * Probe the first byte of the virtual address.  This raises an
102e4d5bf4fSRichard Henderson      * exception for inaccessible pages, and resolves the virtual address
103e4d5bf4fSRichard Henderson      * into the softmmu tlb.
104e4d5bf4fSRichard Henderson      *
105aa03378bSPeter Maydell      * When RA == 0, this is either a pure probe or a no-fault-expected probe.
106aa03378bSPeter Maydell      * Indicate to probe_access_flags no-fault, then either return NULL
107aa03378bSPeter Maydell      * for the pure probe, or assert that we received a valid page for the
108aa03378bSPeter Maydell      * no-fault-expected probe.
109e4d5bf4fSRichard Henderson      */
110d507e6c5SRichard Henderson     flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
111b8967ddfSRichard Henderson                               ra == 0, &host, &full, ra);
112aa03378bSPeter Maydell     if (probe && (flags & TLB_INVALID_MASK)) {
113aa03378bSPeter Maydell         return NULL;
114aa03378bSPeter Maydell     }
115e4d5bf4fSRichard Henderson     assert(!(flags & TLB_INVALID_MASK));
116e4d5bf4fSRichard Henderson 
117e4d5bf4fSRichard Henderson     /* If the virtual page MemAttr != Tagged, access unchecked. */
118a81fef4bSAnton Johansson     if (full->extra.arm.pte_attrs != 0xf0) {
119e4d5bf4fSRichard Henderson         return NULL;
120e4d5bf4fSRichard Henderson     }
121e4d5bf4fSRichard Henderson 
122e4d5bf4fSRichard Henderson     /*
123e4d5bf4fSRichard Henderson      * If not backed by host ram, there is no tag storage: access unchecked.
124e4d5bf4fSRichard Henderson      * This is probably a guest os bug though, so log it.
125e4d5bf4fSRichard Henderson      */
126e4d5bf4fSRichard Henderson     if (unlikely(flags & TLB_MMIO)) {
127e4d5bf4fSRichard Henderson         qemu_log_mask(LOG_GUEST_ERROR,
128e4d5bf4fSRichard Henderson                       "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
129e4d5bf4fSRichard Henderson                       "but is not backed by host ram\n", ptr);
130e4d5bf4fSRichard Henderson         return NULL;
131e4d5bf4fSRichard Henderson     }
132e4d5bf4fSRichard Henderson 
133e4d5bf4fSRichard Henderson     /*
134b8967ddfSRichard Henderson      * Remember these values across the second lookup below,
135b8967ddfSRichard Henderson      * which may invalidate this pointer via tlb resize.
136b8967ddfSRichard Henderson      */
13728fb921fSRichard Henderson     ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
138b8967ddfSRichard Henderson     attrs = full->attrs;
139b8967ddfSRichard Henderson     full = NULL;
140b8967ddfSRichard Henderson 
141b8967ddfSRichard Henderson     /*
142e4d5bf4fSRichard Henderson      * The Normal memory access can extend to the next page.  E.g. a single
143e4d5bf4fSRichard Henderson      * 8-byte access to the last byte of a page will check only the last
144e4d5bf4fSRichard Henderson      * tag on the first page.
145e4d5bf4fSRichard Henderson      * Any page access exception has priority over tag check exception.
146e4d5bf4fSRichard Henderson      */
147e4d5bf4fSRichard Henderson     in_page = -(ptr | TARGET_PAGE_MASK);
148e4d5bf4fSRichard Henderson     if (unlikely(ptr_size > in_page)) {
149d507e6c5SRichard Henderson         flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
150b8967ddfSRichard Henderson                                    ptr_mmu_idx, ra == 0, &host, &full, ra);
151e4d5bf4fSRichard Henderson         assert(!(flags & TLB_INVALID_MASK));
152e4d5bf4fSRichard Henderson     }
153e4d5bf4fSRichard Henderson 
154e4d5bf4fSRichard Henderson     /* Any debug exception has priority over a tag check exception. */
155aa03378bSPeter Maydell     if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
156e4d5bf4fSRichard Henderson         int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
157e4d5bf4fSRichard Henderson         assert(ra != 0);
158b8967ddfSRichard Henderson         cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
159e4d5bf4fSRichard Henderson     }
160e4d5bf4fSRichard Henderson 
161e4d5bf4fSRichard Henderson     /* Convert to the physical address in tag space.  */
162e4d5bf4fSRichard Henderson     tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
163e4d5bf4fSRichard Henderson 
164e4d5bf4fSRichard Henderson     /* Look up the address in tag space. */
165b8967ddfSRichard Henderson     tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
166e4d5bf4fSRichard Henderson     tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
167e4d5bf4fSRichard Henderson     mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
168b8967ddfSRichard Henderson                                  tag_access == MMU_DATA_STORE, attrs);
169e4d5bf4fSRichard Henderson 
170e4d5bf4fSRichard Henderson     /*
171e4d5bf4fSRichard Henderson      * Note that @mr will never be NULL.  If there is nothing in the address
172e4d5bf4fSRichard Henderson      * space at @tag_paddr, the translation will return the unallocated memory
173e4d5bf4fSRichard Henderson      * region.  For our purposes, the result must be ram.
174e4d5bf4fSRichard Henderson      */
175e4d5bf4fSRichard Henderson     if (unlikely(!memory_region_is_ram(mr))) {
176e4d5bf4fSRichard Henderson         /* ??? Failure is a board configuration error. */
177e4d5bf4fSRichard Henderson         qemu_log_mask(LOG_UNIMP,
178e4d5bf4fSRichard Henderson                       "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
179e4d5bf4fSRichard Henderson                       "Normal Memory @ 0x%" HWADDR_PRIx "\n",
180e4d5bf4fSRichard Henderson                       tag_paddr, ptr_paddr);
181e4d5bf4fSRichard Henderson         return NULL;
182e4d5bf4fSRichard Henderson     }
183e4d5bf4fSRichard Henderson 
184e4d5bf4fSRichard Henderson     /*
185e4d5bf4fSRichard Henderson      * Ensure the tag memory is dirty on write, for migration.
186e4d5bf4fSRichard Henderson      * Tag memory can never contain code or display memory (vga).
187e4d5bf4fSRichard Henderson      */
188e4d5bf4fSRichard Henderson     if (tag_access == MMU_DATA_STORE) {
189e4d5bf4fSRichard Henderson         ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
190e4d5bf4fSRichard Henderson         cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
191e4d5bf4fSRichard Henderson     }
192e4d5bf4fSRichard Henderson 
193e4d5bf4fSRichard Henderson     return memory_region_get_ram_ptr(mr) + xlat;
194e4d5bf4fSRichard Henderson #endif
195c15294c1SRichard Henderson }
196c15294c1SRichard Henderson 
197aa03378bSPeter Maydell static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
198aa03378bSPeter Maydell                                    uint64_t ptr, MMUAccessType ptr_access,
199aa03378bSPeter Maydell                                    int ptr_size, MMUAccessType tag_access,
200aa03378bSPeter Maydell                                    uintptr_t ra)
201aa03378bSPeter Maydell {
202aa03378bSPeter Maydell     return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access,
203aa03378bSPeter Maydell                                     ptr_size, tag_access, false, ra);
204aa03378bSPeter Maydell }
205aa03378bSPeter Maydell 
206da54941fSRichard Henderson uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
207da54941fSRichard Henderson {
208da54941fSRichard Henderson     uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
209d4f6dda1SRichard Henderson     int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
210da54941fSRichard Henderson     int start = extract32(env->cp15.rgsr_el1, 0, 4);
211da54941fSRichard Henderson     int seed = extract32(env->cp15.rgsr_el1, 8, 16);
212d4f6dda1SRichard Henderson     int offset, i, rtag;
213d4f6dda1SRichard Henderson 
214d4f6dda1SRichard Henderson     /*
215d4f6dda1SRichard Henderson      * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
216d4f6dda1SRichard Henderson      * deterministic algorithm.  Except that with RRND==1 the kernel is
217d4f6dda1SRichard Henderson      * not required to have set RGSR_EL1.SEED != 0, which is required for
218d4f6dda1SRichard Henderson      * the deterministic algorithm to function.  So we force a non-zero
219d4f6dda1SRichard Henderson      * SEED for that case.
220d4f6dda1SRichard Henderson      */
221d4f6dda1SRichard Henderson     if (unlikely(seed == 0) && rrnd) {
222d4f6dda1SRichard Henderson         do {
223d4f6dda1SRichard Henderson             Error *err = NULL;
224d4f6dda1SRichard Henderson             uint16_t two;
225d4f6dda1SRichard Henderson 
226d4f6dda1SRichard Henderson             if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
227d4f6dda1SRichard Henderson                 /*
228d4f6dda1SRichard Henderson                  * Failed, for unknown reasons in the crypto subsystem.
229d4f6dda1SRichard Henderson                  * Best we can do is log the reason and use a constant seed.
230d4f6dda1SRichard Henderson                  */
231d4f6dda1SRichard Henderson                 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
232d4f6dda1SRichard Henderson                               error_get_pretty(err));
233d4f6dda1SRichard Henderson                 error_free(err);
234d4f6dda1SRichard Henderson                 two = 1;
235d4f6dda1SRichard Henderson             }
236d4f6dda1SRichard Henderson             seed = two;
237d4f6dda1SRichard Henderson         } while (seed == 0);
238d4f6dda1SRichard Henderson     }
239da54941fSRichard Henderson 
240da54941fSRichard Henderson     /* RandomTag */
241da54941fSRichard Henderson     for (i = offset = 0; i < 4; ++i) {
242da54941fSRichard Henderson         /* NextRandomTagBit */
243da54941fSRichard Henderson         int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
244da54941fSRichard Henderson                    extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
245da54941fSRichard Henderson         seed = (top << 15) | (seed >> 1);
246da54941fSRichard Henderson         offset |= top << i;
247da54941fSRichard Henderson     }
248da54941fSRichard Henderson     rtag = choose_nonexcluded_tag(start, offset, exclude);
249da54941fSRichard Henderson     env->cp15.rgsr_el1 = rtag | (seed << 8);
250da54941fSRichard Henderson 
251da54941fSRichard Henderson     return address_with_allocation_tag(rn, rtag);
252da54941fSRichard Henderson }
253efbc78adSRichard Henderson 
254efbc78adSRichard Henderson uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
255efbc78adSRichard Henderson                          int32_t offset, uint32_t tag_offset)
256efbc78adSRichard Henderson {
257efbc78adSRichard Henderson     int start_tag = allocation_tag_from_addr(ptr);
258efbc78adSRichard Henderson     uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
259efbc78adSRichard Henderson     int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
260efbc78adSRichard Henderson 
261efbc78adSRichard Henderson     return address_with_allocation_tag(ptr + offset, rtag);
262efbc78adSRichard Henderson }
263c15294c1SRichard Henderson 
2640c9b437cSGustavo Romero int load_tag1(uint64_t ptr, uint8_t *mem)
265c15294c1SRichard Henderson {
266c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
267c15294c1SRichard Henderson     return extract32(*mem, ofs, 4);
268c15294c1SRichard Henderson }
269c15294c1SRichard Henderson 
270c15294c1SRichard Henderson uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
271c15294c1SRichard Henderson {
272b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
273c15294c1SRichard Henderson     uint8_t *mem;
274c15294c1SRichard Henderson     int rtag = 0;
275c15294c1SRichard Henderson 
276c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
277c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
2780b5ad31dSPeter Maydell                              MMU_DATA_LOAD, GETPC());
279c15294c1SRichard Henderson 
280c15294c1SRichard Henderson     /* Load if page supports tags. */
281c15294c1SRichard Henderson     if (mem) {
282c15294c1SRichard Henderson         rtag = load_tag1(ptr, mem);
283c15294c1SRichard Henderson     }
284c15294c1SRichard Henderson 
285c15294c1SRichard Henderson     return address_with_allocation_tag(xt, rtag);
286c15294c1SRichard Henderson }
287c15294c1SRichard Henderson 
288c15294c1SRichard Henderson static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
289c15294c1SRichard Henderson {
290c15294c1SRichard Henderson     if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
291c15294c1SRichard Henderson         arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
292b7770d72SRichard Henderson                                     arm_env_mmu_index(env), ra);
293c15294c1SRichard Henderson         g_assert_not_reached();
294c15294c1SRichard Henderson     }
295c15294c1SRichard Henderson }
296c15294c1SRichard Henderson 
297c15294c1SRichard Henderson /* For use in a non-parallel context, store to the given nibble.  */
2980c9b437cSGustavo Romero void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
299c15294c1SRichard Henderson {
300c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
301c15294c1SRichard Henderson     *mem = deposit32(*mem, ofs, 4, tag);
302c15294c1SRichard Henderson }
303c15294c1SRichard Henderson 
304c15294c1SRichard Henderson /* For use in a parallel context, atomically store to the given nibble.  */
305c15294c1SRichard Henderson static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
306c15294c1SRichard Henderson {
307c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
308d73415a3SStefan Hajnoczi     uint8_t old = qatomic_read(mem);
309c15294c1SRichard Henderson 
310c15294c1SRichard Henderson     while (1) {
311c15294c1SRichard Henderson         uint8_t new = deposit32(old, ofs, 4, tag);
312d73415a3SStefan Hajnoczi         uint8_t cmp = qatomic_cmpxchg(mem, old, new);
313c15294c1SRichard Henderson         if (likely(cmp == old)) {
314c15294c1SRichard Henderson             return;
315c15294c1SRichard Henderson         }
316c15294c1SRichard Henderson         old = cmp;
317c15294c1SRichard Henderson     }
318c15294c1SRichard Henderson }
319c15294c1SRichard Henderson 
320c15294c1SRichard Henderson typedef void stg_store1(uint64_t, uint8_t *, int);
321c15294c1SRichard Henderson 
322c15294c1SRichard Henderson static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
323c15294c1SRichard Henderson                           uintptr_t ra, stg_store1 store1)
324c15294c1SRichard Henderson {
325b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
326c15294c1SRichard Henderson     uint8_t *mem;
327c15294c1SRichard Henderson 
328c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
329c15294c1SRichard Henderson 
330c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
331c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
3320b5ad31dSPeter Maydell                              MMU_DATA_STORE, ra);
333c15294c1SRichard Henderson 
334c15294c1SRichard Henderson     /* Store if page supports tags. */
335c15294c1SRichard Henderson     if (mem) {
336c15294c1SRichard Henderson         store1(ptr, mem, allocation_tag_from_addr(xt));
337c15294c1SRichard Henderson     }
338c15294c1SRichard Henderson }
339c15294c1SRichard Henderson 
340c15294c1SRichard Henderson void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
341c15294c1SRichard Henderson {
342c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1);
343c15294c1SRichard Henderson }
344c15294c1SRichard Henderson 
345c15294c1SRichard Henderson void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
346c15294c1SRichard Henderson {
347c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
348c15294c1SRichard Henderson }
349c15294c1SRichard Henderson 
350c15294c1SRichard Henderson void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
351c15294c1SRichard Henderson {
352b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
353c15294c1SRichard Henderson     uintptr_t ra = GETPC();
354c15294c1SRichard Henderson 
355c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
356c15294c1SRichard Henderson     probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
357c15294c1SRichard Henderson }
358c15294c1SRichard Henderson 
359c15294c1SRichard Henderson static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
360c15294c1SRichard Henderson                            uintptr_t ra, stg_store1 store1)
361c15294c1SRichard Henderson {
362b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
363c15294c1SRichard Henderson     int tag = allocation_tag_from_addr(xt);
364c15294c1SRichard Henderson     uint8_t *mem1, *mem2;
365c15294c1SRichard Henderson 
366c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
367c15294c1SRichard Henderson 
368c15294c1SRichard Henderson     /*
369c15294c1SRichard Henderson      * Trap if accessing an invalid page(s).
370c15294c1SRichard Henderson      * This takes priority over !allocation_tag_access_enabled.
371c15294c1SRichard Henderson      */
372c15294c1SRichard Henderson     if (ptr & TAG_GRANULE) {
373c15294c1SRichard Henderson         /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
374c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
3750b5ad31dSPeter Maydell                                   TAG_GRANULE, MMU_DATA_STORE, ra);
376c15294c1SRichard Henderson         mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
377c15294c1SRichard Henderson                                   MMU_DATA_STORE, TAG_GRANULE,
3780b5ad31dSPeter Maydell                                   MMU_DATA_STORE, ra);
379c15294c1SRichard Henderson 
380c15294c1SRichard Henderson         /* Store if page(s) support tags. */
381c15294c1SRichard Henderson         if (mem1) {
382c15294c1SRichard Henderson             store1(TAG_GRANULE, mem1, tag);
383c15294c1SRichard Henderson         }
384c15294c1SRichard Henderson         if (mem2) {
385c15294c1SRichard Henderson             store1(0, mem2, tag);
386c15294c1SRichard Henderson         }
387c15294c1SRichard Henderson     } else {
388c15294c1SRichard Henderson         /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
389c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
3900b5ad31dSPeter Maydell                                   2 * TAG_GRANULE, MMU_DATA_STORE, ra);
391c15294c1SRichard Henderson         if (mem1) {
392c15294c1SRichard Henderson             tag |= tag << 4;
393d73415a3SStefan Hajnoczi             qatomic_set(mem1, tag);
394c15294c1SRichard Henderson         }
395c15294c1SRichard Henderson     }
396c15294c1SRichard Henderson }
397c15294c1SRichard Henderson 
398c15294c1SRichard Henderson void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
399c15294c1SRichard Henderson {
400c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1);
401c15294c1SRichard Henderson }
402c15294c1SRichard Henderson 
403c15294c1SRichard Henderson void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
404c15294c1SRichard Henderson {
405c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
406c15294c1SRichard Henderson }
407c15294c1SRichard Henderson 
408c15294c1SRichard Henderson void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
409c15294c1SRichard Henderson {
410b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
411c15294c1SRichard Henderson     uintptr_t ra = GETPC();
412c15294c1SRichard Henderson     int in_page = -(ptr | TARGET_PAGE_MASK);
413c15294c1SRichard Henderson 
414c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
415c15294c1SRichard Henderson 
416c15294c1SRichard Henderson     if (likely(in_page >= 2 * TAG_GRANULE)) {
417c15294c1SRichard Henderson         probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
418c15294c1SRichard Henderson     } else {
419c15294c1SRichard Henderson         probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
420c15294c1SRichard Henderson         probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
421c15294c1SRichard Henderson     }
422c15294c1SRichard Henderson }
4235f716a82SRichard Henderson 
4245f716a82SRichard Henderson uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
4255f716a82SRichard Henderson {
426b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
4275f716a82SRichard Henderson     uintptr_t ra = GETPC();
428851ec6ebSRichard Henderson     int gm_bs = env_archcpu(env)->gm_blocksize;
429851ec6ebSRichard Henderson     int gm_bs_bytes = 4 << gm_bs;
4305f716a82SRichard Henderson     void *tag_mem;
4317134cb07SRichard Henderson     uint64_t ret;
4327134cb07SRichard Henderson     int shift;
4335f716a82SRichard Henderson 
434851ec6ebSRichard Henderson     ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
4355f716a82SRichard Henderson 
4365f716a82SRichard Henderson     /* Trap if accessing an invalid page.  */
4375f716a82SRichard Henderson     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
4380b5ad31dSPeter Maydell                                  gm_bs_bytes, MMU_DATA_LOAD, ra);
4395f716a82SRichard Henderson 
4405f716a82SRichard Henderson     /* The tag is squashed to zero if the page does not support tags.  */
4415f716a82SRichard Henderson     if (!tag_mem) {
4425f716a82SRichard Henderson         return 0;
4435f716a82SRichard Henderson     }
4445f716a82SRichard Henderson 
4455f716a82SRichard Henderson     /*
446851ec6ebSRichard Henderson      * The ordering of elements within the word corresponds to
4477134cb07SRichard Henderson      * a little-endian operation.  Computation of shift comes from
4487134cb07SRichard Henderson      *
4497134cb07SRichard Henderson      *     index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
4507134cb07SRichard Henderson      *     data<index*4+3:index*4> = tag
4517134cb07SRichard Henderson      *
4527134cb07SRichard Henderson      * Because of the alignment of ptr above, BS=6 has shift=0.
4537134cb07SRichard Henderson      * All memory operations are aligned.  Defer support for BS=2,
4547134cb07SRichard Henderson      * requiring insertion or extraction of a nibble, until we
4557134cb07SRichard Henderson      * support a cpu that requires it.
4565f716a82SRichard Henderson      */
457851ec6ebSRichard Henderson     switch (gm_bs) {
4587134cb07SRichard Henderson     case 3:
4597134cb07SRichard Henderson         /* 32 bytes -> 2 tags -> 8 result bits */
4607134cb07SRichard Henderson         ret = *(uint8_t *)tag_mem;
4617134cb07SRichard Henderson         break;
4627134cb07SRichard Henderson     case 4:
4637134cb07SRichard Henderson         /* 64 bytes -> 4 tags -> 16 result bits */
4647134cb07SRichard Henderson         ret = cpu_to_le16(*(uint16_t *)tag_mem);
4657134cb07SRichard Henderson         break;
4667134cb07SRichard Henderson     case 5:
4677134cb07SRichard Henderson         /* 128 bytes -> 8 tags -> 32 result bits */
4687134cb07SRichard Henderson         ret = cpu_to_le32(*(uint32_t *)tag_mem);
4697134cb07SRichard Henderson         break;
470851ec6ebSRichard Henderson     case 6:
471851ec6ebSRichard Henderson         /* 256 bytes -> 16 tags -> 64 result bits */
4727134cb07SRichard Henderson         return cpu_to_le64(*(uint64_t *)tag_mem);
473851ec6ebSRichard Henderson     default:
4747134cb07SRichard Henderson         /*
4757134cb07SRichard Henderson          * CPU configured with unsupported/invalid gm blocksize.
4767134cb07SRichard Henderson          * This is detected early in arm_cpu_realizefn.
4777134cb07SRichard Henderson          */
478851ec6ebSRichard Henderson         g_assert_not_reached();
479851ec6ebSRichard Henderson     }
4807134cb07SRichard Henderson     shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
4817134cb07SRichard Henderson     return ret << shift;
4825f716a82SRichard Henderson }
4835f716a82SRichard Henderson 
4845f716a82SRichard Henderson void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
4855f716a82SRichard Henderson {
486b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
4875f716a82SRichard Henderson     uintptr_t ra = GETPC();
488851ec6ebSRichard Henderson     int gm_bs = env_archcpu(env)->gm_blocksize;
489851ec6ebSRichard Henderson     int gm_bs_bytes = 4 << gm_bs;
4905f716a82SRichard Henderson     void *tag_mem;
4917134cb07SRichard Henderson     int shift;
4925f716a82SRichard Henderson 
493851ec6ebSRichard Henderson     ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
4945f716a82SRichard Henderson 
4955f716a82SRichard Henderson     /* Trap if accessing an invalid page.  */
4965f716a82SRichard Henderson     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
4970b5ad31dSPeter Maydell                                  gm_bs_bytes, MMU_DATA_LOAD, ra);
4985f716a82SRichard Henderson 
4995f716a82SRichard Henderson     /*
5005f716a82SRichard Henderson      * Tag store only happens if the page support tags,
5015f716a82SRichard Henderson      * and if the OS has enabled access to the tags.
5025f716a82SRichard Henderson      */
5035f716a82SRichard Henderson     if (!tag_mem) {
5045f716a82SRichard Henderson         return;
5055f716a82SRichard Henderson     }
5065f716a82SRichard Henderson 
5077134cb07SRichard Henderson     /* See LDGM for comments on BS and on shift.  */
5087134cb07SRichard Henderson     shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
5097134cb07SRichard Henderson     val >>= shift;
510851ec6ebSRichard Henderson     switch (gm_bs) {
5117134cb07SRichard Henderson     case 3:
5127134cb07SRichard Henderson         /* 32 bytes -> 2 tags -> 8 result bits */
5137134cb07SRichard Henderson         *(uint8_t *)tag_mem = val;
5147134cb07SRichard Henderson         break;
5157134cb07SRichard Henderson     case 4:
5167134cb07SRichard Henderson         /* 64 bytes -> 4 tags -> 16 result bits */
5177134cb07SRichard Henderson         *(uint16_t *)tag_mem = cpu_to_le16(val);
5187134cb07SRichard Henderson         break;
5197134cb07SRichard Henderson     case 5:
5207134cb07SRichard Henderson         /* 128 bytes -> 8 tags -> 32 result bits */
5217134cb07SRichard Henderson         *(uint32_t *)tag_mem = cpu_to_le32(val);
5227134cb07SRichard Henderson         break;
523851ec6ebSRichard Henderson     case 6:
5247134cb07SRichard Henderson         /* 256 bytes -> 16 tags -> 64 result bits */
5257134cb07SRichard Henderson         *(uint64_t *)tag_mem = cpu_to_le64(val);
526851ec6ebSRichard Henderson         break;
527851ec6ebSRichard Henderson     default:
528851ec6ebSRichard Henderson         /* cpu configured with unsupported gm blocksize. */
529851ec6ebSRichard Henderson         g_assert_not_reached();
530851ec6ebSRichard Henderson     }
5315f716a82SRichard Henderson }
5325f716a82SRichard Henderson 
5335f716a82SRichard Henderson void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
5345f716a82SRichard Henderson {
5355f716a82SRichard Henderson     uintptr_t ra = GETPC();
536b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
5375f716a82SRichard Henderson     int log2_dcz_bytes, log2_tag_bytes;
5385f716a82SRichard Henderson     intptr_t dcz_bytes, tag_bytes;
5395f716a82SRichard Henderson     uint8_t *mem;
5405f716a82SRichard Henderson 
5415f716a82SRichard Henderson     /*
5425f716a82SRichard Henderson      * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
5435f716a82SRichard Henderson      * i.e. 32 bytes, which is an unreasonably small dcz anyway,
5445f716a82SRichard Henderson      * to make sure that we can access one complete tag byte here.
5455f716a82SRichard Henderson      */
5465f716a82SRichard Henderson     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
5475f716a82SRichard Henderson     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
5485f716a82SRichard Henderson     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
5495f716a82SRichard Henderson     tag_bytes = (intptr_t)1 << log2_tag_bytes;
5505f716a82SRichard Henderson     ptr &= -dcz_bytes;
5515f716a82SRichard Henderson 
5525f716a82SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
5530b5ad31dSPeter Maydell                              MMU_DATA_STORE, ra);
5545f716a82SRichard Henderson     if (mem) {
5555f716a82SRichard Henderson         int tag_pair = (val & 0xf) * 0x11;
5565f716a82SRichard Henderson         memset(mem, tag_pair, tag_bytes);
5575f716a82SRichard Henderson     }
5585f716a82SRichard Henderson }
5590a405be2SRichard Henderson 
56086f0d4c7SPeter Collingbourne static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
56186f0d4c7SPeter Collingbourne                                 uint64_t dirty_ptr, uintptr_t ra)
56286f0d4c7SPeter Collingbourne {
56386f0d4c7SPeter Collingbourne     int is_write, syn;
56486f0d4c7SPeter Collingbourne 
56586f0d4c7SPeter Collingbourne     env->exception.vaddress = dirty_ptr;
56686f0d4c7SPeter Collingbourne 
56786f0d4c7SPeter Collingbourne     is_write = FIELD_EX32(desc, MTEDESC, WRITE);
56886f0d4c7SPeter Collingbourne     syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
56986f0d4c7SPeter Collingbourne                                 0x11);
57086f0d4c7SPeter Collingbourne     raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
57186f0d4c7SPeter Collingbourne     g_assert_not_reached();
57286f0d4c7SPeter Collingbourne }
57386f0d4c7SPeter Collingbourne 
57486f0d4c7SPeter Collingbourne static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
57586f0d4c7SPeter Collingbourne                                  uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
57686f0d4c7SPeter Collingbourne {
57786f0d4c7SPeter Collingbourne     int select;
57886f0d4c7SPeter Collingbourne 
57986f0d4c7SPeter Collingbourne     if (regime_has_2_ranges(arm_mmu_idx)) {
58086f0d4c7SPeter Collingbourne         select = extract64(dirty_ptr, 55, 1);
58186f0d4c7SPeter Collingbourne     } else {
58286f0d4c7SPeter Collingbourne         select = 0;
58386f0d4c7SPeter Collingbourne     }
58486f0d4c7SPeter Collingbourne     env->cp15.tfsr_el[el] |= 1 << select;
58586f0d4c7SPeter Collingbourne #ifdef CONFIG_USER_ONLY
58686f0d4c7SPeter Collingbourne     /*
58786f0d4c7SPeter Collingbourne      * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
58886f0d4c7SPeter Collingbourne      * which then sends a SIGSEGV when the thread is next scheduled.
58986f0d4c7SPeter Collingbourne      * This cpu will return to the main loop at the end of the TB,
59086f0d4c7SPeter Collingbourne      * which is rather sooner than "normal".  But the alternative
59186f0d4c7SPeter Collingbourne      * is waiting until the next syscall.
59286f0d4c7SPeter Collingbourne      */
59386f0d4c7SPeter Collingbourne     qemu_cpu_kick(env_cpu(env));
59486f0d4c7SPeter Collingbourne #endif
59586f0d4c7SPeter Collingbourne }
59686f0d4c7SPeter Collingbourne 
5972e34ff45SRichard Henderson /* Record a tag check failure.  */
59881639989SPeter Maydell void mte_check_fail(CPUARMState *env, uint32_t desc,
5992e34ff45SRichard Henderson                     uint64_t dirty_ptr, uintptr_t ra)
6002e34ff45SRichard Henderson {
601dbf8c321SRichard Henderson     int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
6022e34ff45SRichard Henderson     ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
60386f0d4c7SPeter Collingbourne     int el, reg_el, tcf;
6042e34ff45SRichard Henderson     uint64_t sctlr;
6052e34ff45SRichard Henderson 
6062e34ff45SRichard Henderson     reg_el = regime_el(env, arm_mmu_idx);
6072e34ff45SRichard Henderson     sctlr = env->cp15.sctlr_el[reg_el];
6082e34ff45SRichard Henderson 
6092d928adfSPeter Collingbourne     switch (arm_mmu_idx) {
6102d928adfSPeter Collingbourne     case ARMMMUIdx_E10_0:
6112d928adfSPeter Collingbourne     case ARMMMUIdx_E20_0:
6122d928adfSPeter Collingbourne         el = 0;
6132e34ff45SRichard Henderson         tcf = extract64(sctlr, 38, 2);
6142d928adfSPeter Collingbourne         break;
6152d928adfSPeter Collingbourne     default:
6162d928adfSPeter Collingbourne         el = reg_el;
6172e34ff45SRichard Henderson         tcf = extract64(sctlr, 40, 2);
6182e34ff45SRichard Henderson     }
6192e34ff45SRichard Henderson 
6202e34ff45SRichard Henderson     switch (tcf) {
6212e34ff45SRichard Henderson     case 1:
6225bf100c3SJamie Iles         /* Tag check fail causes a synchronous exception. */
62386f0d4c7SPeter Collingbourne         mte_sync_check_fail(env, desc, dirty_ptr, ra);
62486f0d4c7SPeter Collingbourne         break;
6252e34ff45SRichard Henderson 
6262e34ff45SRichard Henderson     case 0:
6272e34ff45SRichard Henderson         /*
6282e34ff45SRichard Henderson          * Tag check fail does not affect the PE.
6292e34ff45SRichard Henderson          * We eliminate this case by not setting MTE_ACTIVE
6302e34ff45SRichard Henderson          * in tb_flags, so that we never make this runtime call.
6312e34ff45SRichard Henderson          */
6322e34ff45SRichard Henderson         g_assert_not_reached();
6332e34ff45SRichard Henderson 
6342e34ff45SRichard Henderson     case 2:
6352e34ff45SRichard Henderson         /* Tag check fail causes asynchronous flag set.  */
63686f0d4c7SPeter Collingbourne         mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
6372e34ff45SRichard Henderson         break;
6382e34ff45SRichard Henderson 
63986f0d4c7SPeter Collingbourne     case 3:
64086f0d4c7SPeter Collingbourne         /*
64186f0d4c7SPeter Collingbourne          * Tag check fail causes asynchronous flag set for stores, or
64286f0d4c7SPeter Collingbourne          * a synchronous exception for loads.
64386f0d4c7SPeter Collingbourne          */
64486f0d4c7SPeter Collingbourne         if (FIELD_EX32(desc, MTEDESC, WRITE)) {
64586f0d4c7SPeter Collingbourne             mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
64686f0d4c7SPeter Collingbourne         } else {
64786f0d4c7SPeter Collingbourne             mte_sync_check_fail(env, desc, dirty_ptr, ra);
64886f0d4c7SPeter Collingbourne         }
6492e34ff45SRichard Henderson         break;
6502e34ff45SRichard Henderson     }
6512e34ff45SRichard Henderson }
6522e34ff45SRichard Henderson 
6535add8248SRichard Henderson /**
6545add8248SRichard Henderson  * checkN:
6555add8248SRichard Henderson  * @tag: tag memory to test
6565add8248SRichard Henderson  * @odd: true to begin testing at tags at odd nibble
6575add8248SRichard Henderson  * @cmp: the tag to compare against
6585add8248SRichard Henderson  * @count: number of tags to test
6595add8248SRichard Henderson  *
6605add8248SRichard Henderson  * Return the number of successful tests.
6615add8248SRichard Henderson  * Thus a return value < @count indicates a failure.
6625add8248SRichard Henderson  *
6635add8248SRichard Henderson  * A note about sizes: count is expected to be small.
6645add8248SRichard Henderson  *
6655add8248SRichard Henderson  * The most common use will be LDP/STP of two integer registers,
6665add8248SRichard Henderson  * which means 16 bytes of memory touching at most 2 tags, but
6675add8248SRichard Henderson  * often the access is aligned and thus just 1 tag.
6685add8248SRichard Henderson  *
6695add8248SRichard Henderson  * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
6705add8248SRichard Henderson  * touching at most 5 tags.  SVE LDR/STR (vector) with the default
6715add8248SRichard Henderson  * vector length is also 64 bytes; the maximum architectural length
6725add8248SRichard Henderson  * is 256 bytes touching at most 9 tags.
6735add8248SRichard Henderson  *
6745add8248SRichard Henderson  * The loop below uses 7 logical operations and 1 memory operation
6755add8248SRichard Henderson  * per tag pair.  An implementation that loads an aligned word and
6765add8248SRichard Henderson  * uses masking to ignore adjacent tags requires 18 logical operations
6775add8248SRichard Henderson  * and thus does not begin to pay off until 6 tags.
6785add8248SRichard Henderson  * Which, according to the survey above, is unlikely to be common.
6795add8248SRichard Henderson  */
6805add8248SRichard Henderson static int checkN(uint8_t *mem, int odd, int cmp, int count)
6815add8248SRichard Henderson {
6825add8248SRichard Henderson     int n = 0, diff;
6835add8248SRichard Henderson 
6845add8248SRichard Henderson     /* Replicate the test tag and compare.  */
6855add8248SRichard Henderson     cmp *= 0x11;
6865add8248SRichard Henderson     diff = *mem++ ^ cmp;
6875add8248SRichard Henderson 
6885add8248SRichard Henderson     if (odd) {
6895add8248SRichard Henderson         goto start_odd;
6905add8248SRichard Henderson     }
6915add8248SRichard Henderson 
6925add8248SRichard Henderson     while (1) {
6935add8248SRichard Henderson         /* Test even tag. */
6945add8248SRichard Henderson         if (unlikely((diff) & 0x0f)) {
6955add8248SRichard Henderson             break;
6965add8248SRichard Henderson         }
6975add8248SRichard Henderson         if (++n == count) {
6985add8248SRichard Henderson             break;
6995add8248SRichard Henderson         }
7005add8248SRichard Henderson 
7015add8248SRichard Henderson     start_odd:
7025add8248SRichard Henderson         /* Test odd tag. */
7035add8248SRichard Henderson         if (unlikely((diff) & 0xf0)) {
7045add8248SRichard Henderson             break;
7055add8248SRichard Henderson         }
7065add8248SRichard Henderson         if (++n == count) {
7075add8248SRichard Henderson             break;
7085add8248SRichard Henderson         }
7095add8248SRichard Henderson 
7105add8248SRichard Henderson         diff = *mem++ ^ cmp;
7115add8248SRichard Henderson     }
7125add8248SRichard Henderson     return n;
7135add8248SRichard Henderson }
7145add8248SRichard Henderson 
715f8c8a860SRichard Henderson /**
71669c51dc3SPeter Maydell  * checkNrev:
71769c51dc3SPeter Maydell  * @tag: tag memory to test
71869c51dc3SPeter Maydell  * @odd: true to begin testing at tags at odd nibble
71969c51dc3SPeter Maydell  * @cmp: the tag to compare against
72069c51dc3SPeter Maydell  * @count: number of tags to test
72169c51dc3SPeter Maydell  *
72269c51dc3SPeter Maydell  * Return the number of successful tests.
72369c51dc3SPeter Maydell  * Thus a return value < @count indicates a failure.
72469c51dc3SPeter Maydell  *
72569c51dc3SPeter Maydell  * This is like checkN, but it runs backwards, checking the
72669c51dc3SPeter Maydell  * tags starting with @tag and then the tags preceding it.
72769c51dc3SPeter Maydell  * This is needed by the backwards-memory-copying operations.
72869c51dc3SPeter Maydell  */
72969c51dc3SPeter Maydell static int checkNrev(uint8_t *mem, int odd, int cmp, int count)
73069c51dc3SPeter Maydell {
73169c51dc3SPeter Maydell     int n = 0, diff;
73269c51dc3SPeter Maydell 
73369c51dc3SPeter Maydell     /* Replicate the test tag and compare.  */
73469c51dc3SPeter Maydell     cmp *= 0x11;
73569c51dc3SPeter Maydell     diff = *mem-- ^ cmp;
73669c51dc3SPeter Maydell 
73769c51dc3SPeter Maydell     if (!odd) {
73869c51dc3SPeter Maydell         goto start_even;
73969c51dc3SPeter Maydell     }
74069c51dc3SPeter Maydell 
74169c51dc3SPeter Maydell     while (1) {
74269c51dc3SPeter Maydell         /* Test odd tag. */
74369c51dc3SPeter Maydell         if (unlikely((diff) & 0xf0)) {
74469c51dc3SPeter Maydell             break;
74569c51dc3SPeter Maydell         }
74669c51dc3SPeter Maydell         if (++n == count) {
74769c51dc3SPeter Maydell             break;
74869c51dc3SPeter Maydell         }
74969c51dc3SPeter Maydell 
75069c51dc3SPeter Maydell     start_even:
75169c51dc3SPeter Maydell         /* Test even tag. */
75269c51dc3SPeter Maydell         if (unlikely((diff) & 0x0f)) {
75369c51dc3SPeter Maydell             break;
75469c51dc3SPeter Maydell         }
75569c51dc3SPeter Maydell         if (++n == count) {
75669c51dc3SPeter Maydell             break;
75769c51dc3SPeter Maydell         }
75869c51dc3SPeter Maydell 
75969c51dc3SPeter Maydell         diff = *mem-- ^ cmp;
76069c51dc3SPeter Maydell     }
76169c51dc3SPeter Maydell     return n;
76269c51dc3SPeter Maydell }
76369c51dc3SPeter Maydell 
76469c51dc3SPeter Maydell /**
765f8c8a860SRichard Henderson  * mte_probe_int() - helper for mte_probe and mte_check
766f8c8a860SRichard Henderson  * @env: CPU environment
767f8c8a860SRichard Henderson  * @desc: MTEDESC descriptor
768f8c8a860SRichard Henderson  * @ptr: virtual address of the base of the access
769f8c8a860SRichard Henderson  * @fault: return virtual address of the first check failure
770f8c8a860SRichard Henderson  *
771f8c8a860SRichard Henderson  * Internal routine for both mte_probe and mte_check.
772f8c8a860SRichard Henderson  * Return zero on failure, filling in *fault.
773f8c8a860SRichard Henderson  * Return negative on trivial success for tbi disabled.
774f8c8a860SRichard Henderson  * Return positive on success with tbi enabled.
775f8c8a860SRichard Henderson  */
776f8c8a860SRichard Henderson static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
77728f32503SRichard Henderson                          uintptr_t ra, uint64_t *fault)
7785add8248SRichard Henderson {
7795add8248SRichard Henderson     int mmu_idx, ptr_tag, bit55;
78098f96050SRichard Henderson     uint64_t ptr_last, prev_page, next_page;
78198f96050SRichard Henderson     uint64_t tag_first, tag_last;
7820b5ad31dSPeter Maydell     uint32_t sizem1, tag_count, n, c;
7835add8248SRichard Henderson     uint8_t *mem1, *mem2;
7845add8248SRichard Henderson     MMUAccessType type;
7855add8248SRichard Henderson 
7865add8248SRichard Henderson     bit55 = extract64(ptr, 55, 1);
787f8c8a860SRichard Henderson     *fault = ptr;
7885add8248SRichard Henderson 
7895add8248SRichard Henderson     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
7905add8248SRichard Henderson     if (unlikely(!tbi_check(desc, bit55))) {
791f8c8a860SRichard Henderson         return -1;
7925add8248SRichard Henderson     }
7935add8248SRichard Henderson 
7945add8248SRichard Henderson     ptr_tag = allocation_tag_from_addr(ptr);
7955add8248SRichard Henderson 
7965add8248SRichard Henderson     if (tcma_check(desc, bit55, ptr_tag)) {
797f8c8a860SRichard Henderson         return 1;
7985add8248SRichard Henderson     }
7995add8248SRichard Henderson 
8005add8248SRichard Henderson     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
8015add8248SRichard Henderson     type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
80228f32503SRichard Henderson     sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
8035add8248SRichard Henderson 
80498f96050SRichard Henderson     /* Find the addr of the end of the access */
80528f32503SRichard Henderson     ptr_last = ptr + sizem1;
8065add8248SRichard Henderson 
8075add8248SRichard Henderson     /* Round the bounds to the tag granule, and compute the number of tags. */
8085add8248SRichard Henderson     tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
80998f96050SRichard Henderson     tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
81098f96050SRichard Henderson     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
8115add8248SRichard Henderson 
8125add8248SRichard Henderson     /* Locate the page boundaries. */
8135add8248SRichard Henderson     prev_page = ptr & TARGET_PAGE_MASK;
8145add8248SRichard Henderson     next_page = prev_page + TARGET_PAGE_SIZE;
8155add8248SRichard Henderson 
816d3327a38SRichard Henderson     if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
8175add8248SRichard Henderson         /* Memory access stays on one page. */
81828f32503SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
8190b5ad31dSPeter Maydell                                   MMU_DATA_LOAD, ra);
8205add8248SRichard Henderson         if (!mem1) {
821f8c8a860SRichard Henderson             return 1;
8225add8248SRichard Henderson         }
8235add8248SRichard Henderson         /* Perform all of the comparisons. */
8245add8248SRichard Henderson         n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
8255add8248SRichard Henderson     } else {
8265add8248SRichard Henderson         /* Memory access crosses to next page. */
8275add8248SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
8280b5ad31dSPeter Maydell                                   MMU_DATA_LOAD, ra);
8295add8248SRichard Henderson 
8305add8248SRichard Henderson         mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
83198f96050SRichard Henderson                                   ptr_last - next_page + 1,
8320b5ad31dSPeter Maydell                                   MMU_DATA_LOAD, ra);
8335add8248SRichard Henderson 
8345add8248SRichard Henderson         /*
8355add8248SRichard Henderson          * Perform all of the comparisons.
8365add8248SRichard Henderson          * Note the possible but unlikely case of the operation spanning
8375add8248SRichard Henderson          * two pages that do not both have tagging enabled.
8385add8248SRichard Henderson          */
8395add8248SRichard Henderson         n = c = (next_page - tag_first) / TAG_GRANULE;
8405add8248SRichard Henderson         if (mem1) {
8415add8248SRichard Henderson             n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
8425add8248SRichard Henderson         }
8435add8248SRichard Henderson         if (n == c) {
8445add8248SRichard Henderson             if (!mem2) {
845f8c8a860SRichard Henderson                 return 1;
8465add8248SRichard Henderson             }
8475add8248SRichard Henderson             n += checkN(mem2, 0, ptr_tag, tag_count - c);
8485add8248SRichard Henderson         }
8495add8248SRichard Henderson     }
8505add8248SRichard Henderson 
851f8c8a860SRichard Henderson     if (likely(n == tag_count)) {
852f8c8a860SRichard Henderson         return 1;
853f8c8a860SRichard Henderson     }
854f8c8a860SRichard Henderson 
8555add8248SRichard Henderson     /*
85698f96050SRichard Henderson      * If we failed, we know which granule.  For the first granule, the
85798f96050SRichard Henderson      * failure address is @ptr, the first byte accessed.  Otherwise the
85898f96050SRichard Henderson      * failure address is the first byte of the nth granule.
8595add8248SRichard Henderson      */
860f8c8a860SRichard Henderson     if (n > 0) {
861f8c8a860SRichard Henderson         *fault = tag_first + n * TAG_GRANULE;
862f8c8a860SRichard Henderson     }
863f8c8a860SRichard Henderson     return 0;
8645add8248SRichard Henderson }
8655add8248SRichard Henderson 
866bd47b61cSRichard Henderson uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
867f8c8a860SRichard Henderson {
868f8c8a860SRichard Henderson     uint64_t fault;
86928f32503SRichard Henderson     int ret = mte_probe_int(env, desc, ptr, ra, &fault);
870f8c8a860SRichard Henderson 
871f8c8a860SRichard Henderson     if (unlikely(ret == 0)) {
872f8c8a860SRichard Henderson         mte_check_fail(env, desc, fault, ra);
873f8c8a860SRichard Henderson     } else if (ret < 0) {
874f8c8a860SRichard Henderson         return ptr;
875f8c8a860SRichard Henderson     }
8765add8248SRichard Henderson     return useronly_clean_ptr(ptr);
8775add8248SRichard Henderson }
8785add8248SRichard Henderson 
879bd47b61cSRichard Henderson uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
88073ceeb00SRichard Henderson {
881523da6b9SRichard Henderson     /*
882523da6b9SRichard Henderson      * R_XCHFJ: Alignment check not caused by memory type is priority 1,
883523da6b9SRichard Henderson      * higher than any translation fault.  When MTE is disabled, tcg
884523da6b9SRichard Henderson      * performs the alignment check during the code generated for the
885523da6b9SRichard Henderson      * memory access.  With MTE enabled, we must check this here before
886523da6b9SRichard Henderson      * raising any translation fault in allocation_tag_mem.
887523da6b9SRichard Henderson      */
888523da6b9SRichard Henderson     unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN);
889523da6b9SRichard Henderson     if (unlikely(align)) {
890523da6b9SRichard Henderson         align = (1u << align) - 1;
891523da6b9SRichard Henderson         if (unlikely(ptr & align)) {
892523da6b9SRichard Henderson             int idx = FIELD_EX32(desc, MTEDESC, MIDX);
893523da6b9SRichard Henderson             bool w = FIELD_EX32(desc, MTEDESC, WRITE);
894523da6b9SRichard Henderson             MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD;
895523da6b9SRichard Henderson             arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC());
896523da6b9SRichard Henderson         }
897523da6b9SRichard Henderson     }
898523da6b9SRichard Henderson 
899bd47b61cSRichard Henderson     return mte_check(env, desc, ptr, GETPC());
9004a09a213SRichard Henderson }
9014a09a213SRichard Henderson 
9024a09a213SRichard Henderson /*
903d304d280SRichard Henderson  * No-fault version of mte_check, to be used by SVE for MemSingleNF.
9044a09a213SRichard Henderson  * Returns false if the access is Checked and the check failed.  This
9054a09a213SRichard Henderson  * is only intended to probe the tag -- the validity of the page must
9064a09a213SRichard Henderson  * be checked beforehand.
9074a09a213SRichard Henderson  */
908d304d280SRichard Henderson bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
9094a09a213SRichard Henderson {
9104a09a213SRichard Henderson     uint64_t fault;
91128f32503SRichard Henderson     int ret = mte_probe_int(env, desc, ptr, 0, &fault);
9124a09a213SRichard Henderson 
9134a09a213SRichard Henderson     return ret != 0;
9144a09a213SRichard Henderson }
9154a09a213SRichard Henderson 
91646dc1bc0SRichard Henderson /*
91746dc1bc0SRichard Henderson  * Perform an MTE checked access for DC_ZVA.
91846dc1bc0SRichard Henderson  */
91946dc1bc0SRichard Henderson uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
92046dc1bc0SRichard Henderson {
92146dc1bc0SRichard Henderson     uintptr_t ra = GETPC();
92246dc1bc0SRichard Henderson     int log2_dcz_bytes, log2_tag_bytes;
92346dc1bc0SRichard Henderson     int mmu_idx, bit55;
92446dc1bc0SRichard Henderson     intptr_t dcz_bytes, tag_bytes, i;
92546dc1bc0SRichard Henderson     void *mem;
92646dc1bc0SRichard Henderson     uint64_t ptr_tag, mem_tag, align_ptr;
92746dc1bc0SRichard Henderson 
92846dc1bc0SRichard Henderson     bit55 = extract64(ptr, 55, 1);
92946dc1bc0SRichard Henderson 
93046dc1bc0SRichard Henderson     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
93146dc1bc0SRichard Henderson     if (unlikely(!tbi_check(desc, bit55))) {
93246dc1bc0SRichard Henderson         return ptr;
93346dc1bc0SRichard Henderson     }
93446dc1bc0SRichard Henderson 
93546dc1bc0SRichard Henderson     ptr_tag = allocation_tag_from_addr(ptr);
93646dc1bc0SRichard Henderson 
93746dc1bc0SRichard Henderson     if (tcma_check(desc, bit55, ptr_tag)) {
93846dc1bc0SRichard Henderson         goto done;
93946dc1bc0SRichard Henderson     }
94046dc1bc0SRichard Henderson 
94146dc1bc0SRichard Henderson     /*
94246dc1bc0SRichard Henderson      * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
94346dc1bc0SRichard Henderson      * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
94446dc1bc0SRichard Henderson      * sure that we can access one complete tag byte here.
94546dc1bc0SRichard Henderson      */
94646dc1bc0SRichard Henderson     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
94746dc1bc0SRichard Henderson     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
94846dc1bc0SRichard Henderson     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
94946dc1bc0SRichard Henderson     tag_bytes = (intptr_t)1 << log2_tag_bytes;
95046dc1bc0SRichard Henderson     align_ptr = ptr & -dcz_bytes;
95146dc1bc0SRichard Henderson 
95246dc1bc0SRichard Henderson     /*
95346dc1bc0SRichard Henderson      * Trap if accessing an invalid page.  DC_ZVA requires that we supply
95446dc1bc0SRichard Henderson      * the original pointer for an invalid page.  But watchpoints require
95546dc1bc0SRichard Henderson      * that we probe the actual space.  So do both.
95646dc1bc0SRichard Henderson      */
95746dc1bc0SRichard Henderson     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
95846dc1bc0SRichard Henderson     (void) probe_write(env, ptr, 1, mmu_idx, ra);
95946dc1bc0SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
9600b5ad31dSPeter Maydell                              dcz_bytes, MMU_DATA_LOAD, ra);
96146dc1bc0SRichard Henderson     if (!mem) {
96246dc1bc0SRichard Henderson         goto done;
96346dc1bc0SRichard Henderson     }
96446dc1bc0SRichard Henderson 
96546dc1bc0SRichard Henderson     /*
96646dc1bc0SRichard Henderson      * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
96746dc1bc0SRichard Henderson      * it is quite easy to perform all of the comparisons at once without
96846dc1bc0SRichard Henderson      * any extra masking.
96946dc1bc0SRichard Henderson      *
97046dc1bc0SRichard Henderson      * The most common zva block size is 64; some of the thunderx cpus use
97146dc1bc0SRichard Henderson      * a block size of 128.  For user-only, aarch64_max_initfn will set the
97246dc1bc0SRichard Henderson      * block size to 512.  Fill out the other cases for future-proofing.
97346dc1bc0SRichard Henderson      *
97446dc1bc0SRichard Henderson      * In order to be able to find the first miscompare later, we want the
97546dc1bc0SRichard Henderson      * tag bytes to be in little-endian order.
97646dc1bc0SRichard Henderson      */
97746dc1bc0SRichard Henderson     switch (log2_tag_bytes) {
97846dc1bc0SRichard Henderson     case 0: /* zva_blocksize 32 */
97946dc1bc0SRichard Henderson         mem_tag = *(uint8_t *)mem;
98046dc1bc0SRichard Henderson         ptr_tag *= 0x11u;
98146dc1bc0SRichard Henderson         break;
98246dc1bc0SRichard Henderson     case 1: /* zva_blocksize 64 */
98346dc1bc0SRichard Henderson         mem_tag = cpu_to_le16(*(uint16_t *)mem);
98446dc1bc0SRichard Henderson         ptr_tag *= 0x1111u;
98546dc1bc0SRichard Henderson         break;
98646dc1bc0SRichard Henderson     case 2: /* zva_blocksize 128 */
98746dc1bc0SRichard Henderson         mem_tag = cpu_to_le32(*(uint32_t *)mem);
98846dc1bc0SRichard Henderson         ptr_tag *= 0x11111111u;
98946dc1bc0SRichard Henderson         break;
99046dc1bc0SRichard Henderson     case 3: /* zva_blocksize 256 */
99146dc1bc0SRichard Henderson         mem_tag = cpu_to_le64(*(uint64_t *)mem);
99246dc1bc0SRichard Henderson         ptr_tag *= 0x1111111111111111ull;
99346dc1bc0SRichard Henderson         break;
99446dc1bc0SRichard Henderson 
99546dc1bc0SRichard Henderson     default: /* zva_blocksize 512, 1024, 2048 */
99646dc1bc0SRichard Henderson         ptr_tag *= 0x1111111111111111ull;
99746dc1bc0SRichard Henderson         i = 0;
99846dc1bc0SRichard Henderson         do {
99946dc1bc0SRichard Henderson             mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
100046dc1bc0SRichard Henderson             if (unlikely(mem_tag != ptr_tag)) {
100146dc1bc0SRichard Henderson                 goto fail;
100246dc1bc0SRichard Henderson             }
100346dc1bc0SRichard Henderson             i += 8;
100446dc1bc0SRichard Henderson             align_ptr += 16 * TAG_GRANULE;
100546dc1bc0SRichard Henderson         } while (i < tag_bytes);
100646dc1bc0SRichard Henderson         goto done;
100746dc1bc0SRichard Henderson     }
100846dc1bc0SRichard Henderson 
100946dc1bc0SRichard Henderson     if (likely(mem_tag == ptr_tag)) {
101046dc1bc0SRichard Henderson         goto done;
101146dc1bc0SRichard Henderson     }
101246dc1bc0SRichard Henderson 
101346dc1bc0SRichard Henderson  fail:
101446dc1bc0SRichard Henderson     /* Locate the first nibble that differs. */
101546dc1bc0SRichard Henderson     i = ctz64(mem_tag ^ ptr_tag) >> 4;
1016dbf8c321SRichard Henderson     mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
101746dc1bc0SRichard Henderson 
101846dc1bc0SRichard Henderson  done:
101946dc1bc0SRichard Henderson     return useronly_clean_ptr(ptr);
102046dc1bc0SRichard Henderson }
102181639989SPeter Maydell 
102281639989SPeter Maydell uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
102381639989SPeter Maydell                         uint32_t desc)
102481639989SPeter Maydell {
102581639989SPeter Maydell     int mmu_idx, tag_count;
102681639989SPeter Maydell     uint64_t ptr_tag, tag_first, tag_last;
102781639989SPeter Maydell     void *mem;
102881639989SPeter Maydell     bool w = FIELD_EX32(desc, MTEDESC, WRITE);
102981639989SPeter Maydell     uint32_t n;
103081639989SPeter Maydell 
103181639989SPeter Maydell     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
103281639989SPeter Maydell     /* True probe; this will never fault */
103381639989SPeter Maydell     mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
103481639989SPeter Maydell                                    w ? MMU_DATA_STORE : MMU_DATA_LOAD,
103581639989SPeter Maydell                                    size, MMU_DATA_LOAD, true, 0);
103681639989SPeter Maydell     if (!mem) {
103781639989SPeter Maydell         return size;
103881639989SPeter Maydell     }
103981639989SPeter Maydell 
104081639989SPeter Maydell     /*
104181639989SPeter Maydell      * TODO: checkN() is not designed for checks of the size we expect
104281639989SPeter Maydell      * for FEAT_MOPS operations, so we should implement this differently.
104381639989SPeter Maydell      * Maybe we should do something like
104481639989SPeter Maydell      *   if (region start and size are aligned nicely) {
104581639989SPeter Maydell      *      do direct loads of 64 tag bits at a time;
104681639989SPeter Maydell      *   } else {
104781639989SPeter Maydell      *      call checkN()
104881639989SPeter Maydell      *   }
104981639989SPeter Maydell      */
105081639989SPeter Maydell     /* Round the bounds to the tag granule, and compute the number of tags. */
105181639989SPeter Maydell     ptr_tag = allocation_tag_from_addr(ptr);
105281639989SPeter Maydell     tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
105381639989SPeter Maydell     tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE);
105481639989SPeter Maydell     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
105581639989SPeter Maydell     n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
105681639989SPeter Maydell     if (likely(n == tag_count)) {
105781639989SPeter Maydell         return size;
105881639989SPeter Maydell     }
105981639989SPeter Maydell 
106081639989SPeter Maydell     /*
106181639989SPeter Maydell      * Failure; for the first granule, it's at @ptr. Otherwise
106281639989SPeter Maydell      * it's at the first byte of the nth granule. Calculate how
106381639989SPeter Maydell      * many bytes we can access without hitting that failure.
106481639989SPeter Maydell      */
106581639989SPeter Maydell     if (n == 0) {
106681639989SPeter Maydell         return 0;
106781639989SPeter Maydell     } else {
106881639989SPeter Maydell         return n * TAG_GRANULE - (ptr - tag_first);
106981639989SPeter Maydell     }
107081639989SPeter Maydell }
10716087df57SPeter Maydell 
107269c51dc3SPeter Maydell uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
107369c51dc3SPeter Maydell                             uint32_t desc)
107469c51dc3SPeter Maydell {
107569c51dc3SPeter Maydell     int mmu_idx, tag_count;
107669c51dc3SPeter Maydell     uint64_t ptr_tag, tag_first, tag_last;
107769c51dc3SPeter Maydell     void *mem;
107869c51dc3SPeter Maydell     bool w = FIELD_EX32(desc, MTEDESC, WRITE);
107969c51dc3SPeter Maydell     uint32_t n;
108069c51dc3SPeter Maydell 
108169c51dc3SPeter Maydell     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
10824d044472SPeter Maydell     /*
10834d044472SPeter Maydell      * True probe; this will never fault. Note that our caller passes
10844d044472SPeter Maydell      * us a pointer to the end of the region, but allocation_tag_mem_probe()
10854d044472SPeter Maydell      * wants a pointer to the start. Because we know we don't span a page
10864d044472SPeter Maydell      * boundary and that allocation_tag_mem_probe() doesn't otherwise care
10874d044472SPeter Maydell      * about the size, pass in a size of 1 byte. This is simpler than
10884d044472SPeter Maydell      * adjusting the ptr to point to the start of the region and then having
10894d044472SPeter Maydell      * to adjust the returned 'mem' to get the end of the tag memory.
10904d044472SPeter Maydell      */
109169c51dc3SPeter Maydell     mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
109269c51dc3SPeter Maydell                                    w ? MMU_DATA_STORE : MMU_DATA_LOAD,
10934d044472SPeter Maydell                                    1, MMU_DATA_LOAD, true, 0);
109469c51dc3SPeter Maydell     if (!mem) {
109569c51dc3SPeter Maydell         return size;
109669c51dc3SPeter Maydell     }
109769c51dc3SPeter Maydell 
109869c51dc3SPeter Maydell     /*
109969c51dc3SPeter Maydell      * TODO: checkNrev() is not designed for checks of the size we expect
110069c51dc3SPeter Maydell      * for FEAT_MOPS operations, so we should implement this differently.
110169c51dc3SPeter Maydell      * Maybe we should do something like
110269c51dc3SPeter Maydell      *   if (region start and size are aligned nicely) {
110369c51dc3SPeter Maydell      *      do direct loads of 64 tag bits at a time;
110469c51dc3SPeter Maydell      *   } else {
110569c51dc3SPeter Maydell      *      call checkN()
110669c51dc3SPeter Maydell      *   }
110769c51dc3SPeter Maydell      */
110869c51dc3SPeter Maydell     /* Round the bounds to the tag granule, and compute the number of tags. */
110969c51dc3SPeter Maydell     ptr_tag = allocation_tag_from_addr(ptr);
111069c51dc3SPeter Maydell     tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE);
111169c51dc3SPeter Maydell     tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
111269c51dc3SPeter Maydell     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
111369c51dc3SPeter Maydell     n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
111469c51dc3SPeter Maydell     if (likely(n == tag_count)) {
111569c51dc3SPeter Maydell         return size;
111669c51dc3SPeter Maydell     }
111769c51dc3SPeter Maydell 
111869c51dc3SPeter Maydell     /*
111969c51dc3SPeter Maydell      * Failure; for the first granule, it's at @ptr. Otherwise
112069c51dc3SPeter Maydell      * it's at the last byte of the nth granule. Calculate how
112169c51dc3SPeter Maydell      * many bytes we can access without hitting that failure.
112269c51dc3SPeter Maydell      */
112369c51dc3SPeter Maydell     if (n == 0) {
112469c51dc3SPeter Maydell         return 0;
112569c51dc3SPeter Maydell     } else {
112669c51dc3SPeter Maydell         return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last);
112769c51dc3SPeter Maydell     }
112869c51dc3SPeter Maydell }
112969c51dc3SPeter Maydell 
11306087df57SPeter Maydell void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
11316087df57SPeter Maydell                        uint32_t desc)
11326087df57SPeter Maydell {
11336087df57SPeter Maydell     int mmu_idx, tag_count;
11346087df57SPeter Maydell     uint64_t ptr_tag;
11356087df57SPeter Maydell     void *mem;
11366087df57SPeter Maydell 
11376087df57SPeter Maydell     if (!desc) {
11386087df57SPeter Maydell         /* Tags not actually enabled */
11396087df57SPeter Maydell         return;
11406087df57SPeter Maydell     }
11416087df57SPeter Maydell 
11426087df57SPeter Maydell     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
11436087df57SPeter Maydell     /* True probe: this will never fault */
11446087df57SPeter Maydell     mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size,
11456087df57SPeter Maydell                                    MMU_DATA_STORE, true, 0);
11466087df57SPeter Maydell     if (!mem) {
11476087df57SPeter Maydell         return;
11486087df57SPeter Maydell     }
11496087df57SPeter Maydell 
11506087df57SPeter Maydell     /*
11516087df57SPeter Maydell      * We know that ptr and size are both TAG_GRANULE aligned; store
11526087df57SPeter Maydell      * the tag from the pointer value into the tag memory.
11536087df57SPeter Maydell      */
11546087df57SPeter Maydell     ptr_tag = allocation_tag_from_addr(ptr);
11556087df57SPeter Maydell     tag_count = size / TAG_GRANULE;
11566087df57SPeter Maydell     if (ptr & TAG_GRANULE) {
11576087df57SPeter Maydell         /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
11586087df57SPeter Maydell         store_tag1_parallel(TAG_GRANULE, mem, ptr_tag);
11596087df57SPeter Maydell         mem++;
11606087df57SPeter Maydell         tag_count--;
11616087df57SPeter Maydell     }
11626087df57SPeter Maydell     memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2);
11636087df57SPeter Maydell     if (tag_count & 1) {
11646087df57SPeter Maydell         /* Final trailing unaligned nibble */
11656087df57SPeter Maydell         mem += tag_count / 2;
11666087df57SPeter Maydell         store_tag1_parallel(0, mem, ptr_tag);
11676087df57SPeter Maydell     }
11686087df57SPeter Maydell }
1169