xref: /qemu/target/arm/tcg/mte_helper.c (revision fe1a3ace13a8b53fc20c74fb7e3337f754396e6b)
1da54941fSRichard Henderson /*
2da54941fSRichard Henderson  * ARM v8.5-MemTag Operations
3da54941fSRichard Henderson  *
4da54941fSRichard Henderson  * Copyright (c) 2020 Linaro, Ltd.
5da54941fSRichard Henderson  *
6da54941fSRichard Henderson  * This library is free software; you can redistribute it and/or
7da54941fSRichard Henderson  * modify it under the terms of the GNU Lesser General Public
8da54941fSRichard Henderson  * License as published by the Free Software Foundation; either
9da54941fSRichard Henderson  * version 2.1 of the License, or (at your option) any later version.
10da54941fSRichard Henderson  *
11da54941fSRichard Henderson  * This library is distributed in the hope that it will be useful,
12da54941fSRichard Henderson  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13da54941fSRichard Henderson  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14da54941fSRichard Henderson  * Lesser General Public License for more details.
15da54941fSRichard Henderson  *
16da54941fSRichard Henderson  * You should have received a copy of the GNU Lesser General Public
17da54941fSRichard Henderson  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18da54941fSRichard Henderson  */
19da54941fSRichard Henderson 
20da54941fSRichard Henderson #include "qemu/osdep.h"
21cd617484SPhilippe Mathieu-Daudé #include "qemu/log.h"
22da54941fSRichard Henderson #include "cpu.h"
23da54941fSRichard Henderson #include "internals.h"
24da54941fSRichard Henderson #include "exec/exec-all.h"
2574781c08SPhilippe Mathieu-Daudé #include "exec/page-protection.h"
2662ef949bSPhilippe Mathieu-Daudé #ifdef CONFIG_USER_ONLY
271bf0d6e4SPhilippe Mathieu-Daudé #include "user/cpu_loop.h"
2862ef949bSPhilippe Mathieu-Daudé #include "user/page-protection.h"
2962ef949bSPhilippe Mathieu-Daudé #else
304705a71dSRichard Henderson #include "system/ram_addr.h"
31a6b3f532SPhilippe Mathieu-Daudé #endif
3242fa9665SPhilippe Mathieu-Daudé #include "accel/tcg/cpu-ldst.h"
33*fe1a3aceSPhilippe Mathieu-Daudé #include "accel/tcg/probe.h"
34da54941fSRichard Henderson #include "exec/helper-proto.h"
354d43552aSPierrick Bouvier #include "exec/tlb-flags.h"
3615017436SPhilippe Mathieu-Daudé #include "accel/tcg/cpu-ops.h"
37d4f6dda1SRichard Henderson #include "qapi/error.h"
38d4f6dda1SRichard Henderson #include "qemu/guest-random.h"
390c9b437cSGustavo Romero #include "mte_helper.h"
40da54941fSRichard Henderson 
41da54941fSRichard Henderson 
42da54941fSRichard Henderson static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
43da54941fSRichard Henderson {
44da54941fSRichard Henderson     if (exclude == 0xffff) {
45da54941fSRichard Henderson         return 0;
46da54941fSRichard Henderson     }
47da54941fSRichard Henderson     if (offset == 0) {
48da54941fSRichard Henderson         while (exclude & (1 << tag)) {
49da54941fSRichard Henderson             tag = (tag + 1) & 15;
50da54941fSRichard Henderson         }
51da54941fSRichard Henderson     } else {
52da54941fSRichard Henderson         do {
53da54941fSRichard Henderson             do {
54da54941fSRichard Henderson                 tag = (tag + 1) & 15;
55da54941fSRichard Henderson             } while (exclude & (1 << tag));
56da54941fSRichard Henderson         } while (--offset > 0);
57da54941fSRichard Henderson     }
58da54941fSRichard Henderson     return tag;
59da54941fSRichard Henderson }
60da54941fSRichard Henderson 
610c9b437cSGustavo Romero uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
62c15294c1SRichard Henderson                                   uint64_t ptr, MMUAccessType ptr_access,
63c15294c1SRichard Henderson                                   int ptr_size, MMUAccessType tag_access,
64aa03378bSPeter Maydell                                   bool probe, uintptr_t ra)
65c15294c1SRichard Henderson {
66e4d5bf4fSRichard Henderson #ifdef CONFIG_USER_ONLY
67a11d3830SRichard Henderson     uint64_t clean_ptr = useronly_clean_ptr(ptr);
68a11d3830SRichard Henderson     int flags = page_get_flags(clean_ptr);
69a11d3830SRichard Henderson     uint8_t *tags;
70a11d3830SRichard Henderson     uintptr_t index;
71a11d3830SRichard Henderson 
72aa03378bSPeter Maydell     assert(!(probe && ra));
73aa03378bSPeter Maydell 
74ff38bca7SRichard Henderson     if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
7541bfb670SGustavo Romero         if (probe) {
7641bfb670SGustavo Romero             return NULL;
7741bfb670SGustavo Romero         }
785e98763cSRichard Henderson         cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
795e98763cSRichard Henderson                               !(flags & PAGE_VALID), ra);
80a11d3830SRichard Henderson     }
81a11d3830SRichard Henderson 
82a11d3830SRichard Henderson     /* Require both MAP_ANON and PROT_MTE for the page. */
83a11d3830SRichard Henderson     if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
84c15294c1SRichard Henderson         return NULL;
85a11d3830SRichard Henderson     }
86a11d3830SRichard Henderson 
87a11d3830SRichard Henderson     tags = page_get_target_data(clean_ptr);
88a11d3830SRichard Henderson 
89a11d3830SRichard Henderson     index = extract32(ptr, LOG2_TAG_GRANULE + 1,
90a11d3830SRichard Henderson                       TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
91a11d3830SRichard Henderson     return tags + index;
92e4d5bf4fSRichard Henderson #else
9325d3ec58SRichard Henderson     CPUTLBEntryFull *full;
94b8967ddfSRichard Henderson     MemTxAttrs attrs;
95e4d5bf4fSRichard Henderson     int in_page, flags;
96e4d5bf4fSRichard Henderson     hwaddr ptr_paddr, tag_paddr, xlat;
97e4d5bf4fSRichard Henderson     MemoryRegion *mr;
98e4d5bf4fSRichard Henderson     ARMASIdx tag_asi;
99e4d5bf4fSRichard Henderson     AddressSpace *tag_as;
100e4d5bf4fSRichard Henderson     void *host;
101e4d5bf4fSRichard Henderson 
102e4d5bf4fSRichard Henderson     /*
103e4d5bf4fSRichard Henderson      * Probe the first byte of the virtual address.  This raises an
104e4d5bf4fSRichard Henderson      * exception for inaccessible pages, and resolves the virtual address
105e4d5bf4fSRichard Henderson      * into the softmmu tlb.
106e4d5bf4fSRichard Henderson      *
107aa03378bSPeter Maydell      * When RA == 0, this is either a pure probe or a no-fault-expected probe.
108aa03378bSPeter Maydell      * Indicate to probe_access_flags no-fault, then either return NULL
109aa03378bSPeter Maydell      * for the pure probe, or assert that we received a valid page for the
110aa03378bSPeter Maydell      * no-fault-expected probe.
111e4d5bf4fSRichard Henderson      */
112d507e6c5SRichard Henderson     flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
113b8967ddfSRichard Henderson                               ra == 0, &host, &full, ra);
114aa03378bSPeter Maydell     if (probe && (flags & TLB_INVALID_MASK)) {
115aa03378bSPeter Maydell         return NULL;
116aa03378bSPeter Maydell     }
117e4d5bf4fSRichard Henderson     assert(!(flags & TLB_INVALID_MASK));
118e4d5bf4fSRichard Henderson 
119e4d5bf4fSRichard Henderson     /* If the virtual page MemAttr != Tagged, access unchecked. */
120a81fef4bSAnton Johansson     if (full->extra.arm.pte_attrs != 0xf0) {
121e4d5bf4fSRichard Henderson         return NULL;
122e4d5bf4fSRichard Henderson     }
123e4d5bf4fSRichard Henderson 
124e4d5bf4fSRichard Henderson     /*
125e4d5bf4fSRichard Henderson      * If not backed by host ram, there is no tag storage: access unchecked.
126e4d5bf4fSRichard Henderson      * This is probably a guest os bug though, so log it.
127e4d5bf4fSRichard Henderson      */
128e4d5bf4fSRichard Henderson     if (unlikely(flags & TLB_MMIO)) {
129e4d5bf4fSRichard Henderson         qemu_log_mask(LOG_GUEST_ERROR,
130e4d5bf4fSRichard Henderson                       "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
131e4d5bf4fSRichard Henderson                       "but is not backed by host ram\n", ptr);
132e4d5bf4fSRichard Henderson         return NULL;
133e4d5bf4fSRichard Henderson     }
134e4d5bf4fSRichard Henderson 
135e4d5bf4fSRichard Henderson     /*
136b8967ddfSRichard Henderson      * Remember these values across the second lookup below,
137b8967ddfSRichard Henderson      * which may invalidate this pointer via tlb resize.
138b8967ddfSRichard Henderson      */
13928fb921fSRichard Henderson     ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
140b8967ddfSRichard Henderson     attrs = full->attrs;
141b8967ddfSRichard Henderson     full = NULL;
142b8967ddfSRichard Henderson 
143b8967ddfSRichard Henderson     /*
144e4d5bf4fSRichard Henderson      * The Normal memory access can extend to the next page.  E.g. a single
145e4d5bf4fSRichard Henderson      * 8-byte access to the last byte of a page will check only the last
146e4d5bf4fSRichard Henderson      * tag on the first page.
147e4d5bf4fSRichard Henderson      * Any page access exception has priority over tag check exception.
148e4d5bf4fSRichard Henderson      */
149e4d5bf4fSRichard Henderson     in_page = -(ptr | TARGET_PAGE_MASK);
150e4d5bf4fSRichard Henderson     if (unlikely(ptr_size > in_page)) {
151d507e6c5SRichard Henderson         flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
152b8967ddfSRichard Henderson                                    ptr_mmu_idx, ra == 0, &host, &full, ra);
153e4d5bf4fSRichard Henderson         assert(!(flags & TLB_INVALID_MASK));
154e4d5bf4fSRichard Henderson     }
155e4d5bf4fSRichard Henderson 
156e4d5bf4fSRichard Henderson     /* Any debug exception has priority over a tag check exception. */
157aa03378bSPeter Maydell     if (!probe && unlikely(flags & TLB_WATCHPOINT)) {
158e4d5bf4fSRichard Henderson         int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
159e4d5bf4fSRichard Henderson         assert(ra != 0);
160b8967ddfSRichard Henderson         cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
161e4d5bf4fSRichard Henderson     }
162e4d5bf4fSRichard Henderson 
163e4d5bf4fSRichard Henderson     /* Convert to the physical address in tag space.  */
164e4d5bf4fSRichard Henderson     tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
165e4d5bf4fSRichard Henderson 
166e4d5bf4fSRichard Henderson     /* Look up the address in tag space. */
167b8967ddfSRichard Henderson     tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
168e4d5bf4fSRichard Henderson     tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
169e4d5bf4fSRichard Henderson     mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
170b8967ddfSRichard Henderson                                  tag_access == MMU_DATA_STORE, attrs);
171e4d5bf4fSRichard Henderson 
172e4d5bf4fSRichard Henderson     /*
173e4d5bf4fSRichard Henderson      * Note that @mr will never be NULL.  If there is nothing in the address
174e4d5bf4fSRichard Henderson      * space at @tag_paddr, the translation will return the unallocated memory
175e4d5bf4fSRichard Henderson      * region.  For our purposes, the result must be ram.
176e4d5bf4fSRichard Henderson      */
177e4d5bf4fSRichard Henderson     if (unlikely(!memory_region_is_ram(mr))) {
178e4d5bf4fSRichard Henderson         /* ??? Failure is a board configuration error. */
179e4d5bf4fSRichard Henderson         qemu_log_mask(LOG_UNIMP,
180e4d5bf4fSRichard Henderson                       "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
181e4d5bf4fSRichard Henderson                       "Normal Memory @ 0x%" HWADDR_PRIx "\n",
182e4d5bf4fSRichard Henderson                       tag_paddr, ptr_paddr);
183e4d5bf4fSRichard Henderson         return NULL;
184e4d5bf4fSRichard Henderson     }
185e4d5bf4fSRichard Henderson 
186e4d5bf4fSRichard Henderson     /*
187e4d5bf4fSRichard Henderson      * Ensure the tag memory is dirty on write, for migration.
188e4d5bf4fSRichard Henderson      * Tag memory can never contain code or display memory (vga).
189e4d5bf4fSRichard Henderson      */
190e4d5bf4fSRichard Henderson     if (tag_access == MMU_DATA_STORE) {
191e4d5bf4fSRichard Henderson         ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
192e4d5bf4fSRichard Henderson         cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
193e4d5bf4fSRichard Henderson     }
194e4d5bf4fSRichard Henderson 
195e4d5bf4fSRichard Henderson     return memory_region_get_ram_ptr(mr) + xlat;
196e4d5bf4fSRichard Henderson #endif
197c15294c1SRichard Henderson }
198c15294c1SRichard Henderson 
199aa03378bSPeter Maydell static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
200aa03378bSPeter Maydell                                    uint64_t ptr, MMUAccessType ptr_access,
201aa03378bSPeter Maydell                                    int ptr_size, MMUAccessType tag_access,
202aa03378bSPeter Maydell                                    uintptr_t ra)
203aa03378bSPeter Maydell {
204aa03378bSPeter Maydell     return allocation_tag_mem_probe(env, ptr_mmu_idx, ptr, ptr_access,
205aa03378bSPeter Maydell                                     ptr_size, tag_access, false, ra);
206aa03378bSPeter Maydell }
207aa03378bSPeter Maydell 
208da54941fSRichard Henderson uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
209da54941fSRichard Henderson {
210da54941fSRichard Henderson     uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
211d4f6dda1SRichard Henderson     int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
212da54941fSRichard Henderson     int start = extract32(env->cp15.rgsr_el1, 0, 4);
213da54941fSRichard Henderson     int seed = extract32(env->cp15.rgsr_el1, 8, 16);
214d4f6dda1SRichard Henderson     int offset, i, rtag;
215d4f6dda1SRichard Henderson 
216d4f6dda1SRichard Henderson     /*
217d4f6dda1SRichard Henderson      * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
218d4f6dda1SRichard Henderson      * deterministic algorithm.  Except that with RRND==1 the kernel is
219d4f6dda1SRichard Henderson      * not required to have set RGSR_EL1.SEED != 0, which is required for
220d4f6dda1SRichard Henderson      * the deterministic algorithm to function.  So we force a non-zero
221d4f6dda1SRichard Henderson      * SEED for that case.
222d4f6dda1SRichard Henderson      */
223d4f6dda1SRichard Henderson     if (unlikely(seed == 0) && rrnd) {
224d4f6dda1SRichard Henderson         do {
225d4f6dda1SRichard Henderson             Error *err = NULL;
226d4f6dda1SRichard Henderson             uint16_t two;
227d4f6dda1SRichard Henderson 
228d4f6dda1SRichard Henderson             if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
229d4f6dda1SRichard Henderson                 /*
230d4f6dda1SRichard Henderson                  * Failed, for unknown reasons in the crypto subsystem.
231d4f6dda1SRichard Henderson                  * Best we can do is log the reason and use a constant seed.
232d4f6dda1SRichard Henderson                  */
233d4f6dda1SRichard Henderson                 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
234d4f6dda1SRichard Henderson                               error_get_pretty(err));
235d4f6dda1SRichard Henderson                 error_free(err);
236d4f6dda1SRichard Henderson                 two = 1;
237d4f6dda1SRichard Henderson             }
238d4f6dda1SRichard Henderson             seed = two;
239d4f6dda1SRichard Henderson         } while (seed == 0);
240d4f6dda1SRichard Henderson     }
241da54941fSRichard Henderson 
242da54941fSRichard Henderson     /* RandomTag */
243da54941fSRichard Henderson     for (i = offset = 0; i < 4; ++i) {
244da54941fSRichard Henderson         /* NextRandomTagBit */
245da54941fSRichard Henderson         int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
246da54941fSRichard Henderson                    extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
247da54941fSRichard Henderson         seed = (top << 15) | (seed >> 1);
248da54941fSRichard Henderson         offset |= top << i;
249da54941fSRichard Henderson     }
250da54941fSRichard Henderson     rtag = choose_nonexcluded_tag(start, offset, exclude);
251da54941fSRichard Henderson     env->cp15.rgsr_el1 = rtag | (seed << 8);
252da54941fSRichard Henderson 
253da54941fSRichard Henderson     return address_with_allocation_tag(rn, rtag);
254da54941fSRichard Henderson }
255efbc78adSRichard Henderson 
256efbc78adSRichard Henderson uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
257efbc78adSRichard Henderson                          int32_t offset, uint32_t tag_offset)
258efbc78adSRichard Henderson {
259efbc78adSRichard Henderson     int start_tag = allocation_tag_from_addr(ptr);
260efbc78adSRichard Henderson     uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
261efbc78adSRichard Henderson     int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
262efbc78adSRichard Henderson 
263efbc78adSRichard Henderson     return address_with_allocation_tag(ptr + offset, rtag);
264efbc78adSRichard Henderson }
265c15294c1SRichard Henderson 
2660c9b437cSGustavo Romero int load_tag1(uint64_t ptr, uint8_t *mem)
267c15294c1SRichard Henderson {
268c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
269c15294c1SRichard Henderson     return extract32(*mem, ofs, 4);
270c15294c1SRichard Henderson }
271c15294c1SRichard Henderson 
272c15294c1SRichard Henderson uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
273c15294c1SRichard Henderson {
274b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
275c15294c1SRichard Henderson     uint8_t *mem;
276c15294c1SRichard Henderson     int rtag = 0;
277c15294c1SRichard Henderson 
278c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
279c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
2800b5ad31dSPeter Maydell                              MMU_DATA_LOAD, GETPC());
281c15294c1SRichard Henderson 
282c15294c1SRichard Henderson     /* Load if page supports tags. */
283c15294c1SRichard Henderson     if (mem) {
284c15294c1SRichard Henderson         rtag = load_tag1(ptr, mem);
285c15294c1SRichard Henderson     }
286c15294c1SRichard Henderson 
287c15294c1SRichard Henderson     return address_with_allocation_tag(xt, rtag);
288c15294c1SRichard Henderson }
289c15294c1SRichard Henderson 
290c15294c1SRichard Henderson static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
291c15294c1SRichard Henderson {
292c15294c1SRichard Henderson     if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
293c15294c1SRichard Henderson         arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
294b7770d72SRichard Henderson                                     arm_env_mmu_index(env), ra);
295c15294c1SRichard Henderson         g_assert_not_reached();
296c15294c1SRichard Henderson     }
297c15294c1SRichard Henderson }
298c15294c1SRichard Henderson 
299c15294c1SRichard Henderson /* For use in a non-parallel context, store to the given nibble.  */
3000c9b437cSGustavo Romero void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
301c15294c1SRichard Henderson {
302c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
303c15294c1SRichard Henderson     *mem = deposit32(*mem, ofs, 4, tag);
304c15294c1SRichard Henderson }
305c15294c1SRichard Henderson 
306c15294c1SRichard Henderson /* For use in a parallel context, atomically store to the given nibble.  */
307c15294c1SRichard Henderson static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
308c15294c1SRichard Henderson {
309c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
310d73415a3SStefan Hajnoczi     uint8_t old = qatomic_read(mem);
311c15294c1SRichard Henderson 
312c15294c1SRichard Henderson     while (1) {
313c15294c1SRichard Henderson         uint8_t new = deposit32(old, ofs, 4, tag);
314d73415a3SStefan Hajnoczi         uint8_t cmp = qatomic_cmpxchg(mem, old, new);
315c15294c1SRichard Henderson         if (likely(cmp == old)) {
316c15294c1SRichard Henderson             return;
317c15294c1SRichard Henderson         }
318c15294c1SRichard Henderson         old = cmp;
319c15294c1SRichard Henderson     }
320c15294c1SRichard Henderson }
321c15294c1SRichard Henderson 
322c15294c1SRichard Henderson typedef void stg_store1(uint64_t, uint8_t *, int);
323c15294c1SRichard Henderson 
324c15294c1SRichard Henderson static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
325c15294c1SRichard Henderson                           uintptr_t ra, stg_store1 store1)
326c15294c1SRichard Henderson {
327b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
328c15294c1SRichard Henderson     uint8_t *mem;
329c15294c1SRichard Henderson 
330c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
331c15294c1SRichard Henderson 
332c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
333c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
3340b5ad31dSPeter Maydell                              MMU_DATA_STORE, ra);
335c15294c1SRichard Henderson 
336c15294c1SRichard Henderson     /* Store if page supports tags. */
337c15294c1SRichard Henderson     if (mem) {
338c15294c1SRichard Henderson         store1(ptr, mem, allocation_tag_from_addr(xt));
339c15294c1SRichard Henderson     }
340c15294c1SRichard Henderson }
341c15294c1SRichard Henderson 
342c15294c1SRichard Henderson void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
343c15294c1SRichard Henderson {
344c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1);
345c15294c1SRichard Henderson }
346c15294c1SRichard Henderson 
347c15294c1SRichard Henderson void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
348c15294c1SRichard Henderson {
349c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
350c15294c1SRichard Henderson }
351c15294c1SRichard Henderson 
352c15294c1SRichard Henderson void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
353c15294c1SRichard Henderson {
354b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
355c15294c1SRichard Henderson     uintptr_t ra = GETPC();
356c15294c1SRichard Henderson 
357c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
358c15294c1SRichard Henderson     probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
359c15294c1SRichard Henderson }
360c15294c1SRichard Henderson 
361c15294c1SRichard Henderson static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
362c15294c1SRichard Henderson                            uintptr_t ra, stg_store1 store1)
363c15294c1SRichard Henderson {
364b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
365c15294c1SRichard Henderson     int tag = allocation_tag_from_addr(xt);
366c15294c1SRichard Henderson     uint8_t *mem1, *mem2;
367c15294c1SRichard Henderson 
368c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
369c15294c1SRichard Henderson 
370c15294c1SRichard Henderson     /*
371c15294c1SRichard Henderson      * Trap if accessing an invalid page(s).
372c15294c1SRichard Henderson      * This takes priority over !allocation_tag_access_enabled.
373c15294c1SRichard Henderson      */
374c15294c1SRichard Henderson     if (ptr & TAG_GRANULE) {
375c15294c1SRichard Henderson         /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
376c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
3770b5ad31dSPeter Maydell                                   TAG_GRANULE, MMU_DATA_STORE, ra);
378c15294c1SRichard Henderson         mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
379c15294c1SRichard Henderson                                   MMU_DATA_STORE, TAG_GRANULE,
3800b5ad31dSPeter Maydell                                   MMU_DATA_STORE, ra);
381c15294c1SRichard Henderson 
382c15294c1SRichard Henderson         /* Store if page(s) support tags. */
383c15294c1SRichard Henderson         if (mem1) {
384c15294c1SRichard Henderson             store1(TAG_GRANULE, mem1, tag);
385c15294c1SRichard Henderson         }
386c15294c1SRichard Henderson         if (mem2) {
387c15294c1SRichard Henderson             store1(0, mem2, tag);
388c15294c1SRichard Henderson         }
389c15294c1SRichard Henderson     } else {
390c15294c1SRichard Henderson         /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
391c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
3920b5ad31dSPeter Maydell                                   2 * TAG_GRANULE, MMU_DATA_STORE, ra);
393c15294c1SRichard Henderson         if (mem1) {
394c15294c1SRichard Henderson             tag |= tag << 4;
395d73415a3SStefan Hajnoczi             qatomic_set(mem1, tag);
396c15294c1SRichard Henderson         }
397c15294c1SRichard Henderson     }
398c15294c1SRichard Henderson }
399c15294c1SRichard Henderson 
400c15294c1SRichard Henderson void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
401c15294c1SRichard Henderson {
402c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1);
403c15294c1SRichard Henderson }
404c15294c1SRichard Henderson 
405c15294c1SRichard Henderson void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
406c15294c1SRichard Henderson {
407c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
408c15294c1SRichard Henderson }
409c15294c1SRichard Henderson 
410c15294c1SRichard Henderson void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
411c15294c1SRichard Henderson {
412b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
413c15294c1SRichard Henderson     uintptr_t ra = GETPC();
414c15294c1SRichard Henderson     int in_page = -(ptr | TARGET_PAGE_MASK);
415c15294c1SRichard Henderson 
416c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
417c15294c1SRichard Henderson 
418c15294c1SRichard Henderson     if (likely(in_page >= 2 * TAG_GRANULE)) {
419c15294c1SRichard Henderson         probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
420c15294c1SRichard Henderson     } else {
421c15294c1SRichard Henderson         probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
422c15294c1SRichard Henderson         probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
423c15294c1SRichard Henderson     }
424c15294c1SRichard Henderson }
4255f716a82SRichard Henderson 
4265f716a82SRichard Henderson uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
4275f716a82SRichard Henderson {
428b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
4295f716a82SRichard Henderson     uintptr_t ra = GETPC();
430851ec6ebSRichard Henderson     int gm_bs = env_archcpu(env)->gm_blocksize;
431851ec6ebSRichard Henderson     int gm_bs_bytes = 4 << gm_bs;
4325f716a82SRichard Henderson     void *tag_mem;
4337134cb07SRichard Henderson     uint64_t ret;
4347134cb07SRichard Henderson     int shift;
4355f716a82SRichard Henderson 
436851ec6ebSRichard Henderson     ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
4375f716a82SRichard Henderson 
4385f716a82SRichard Henderson     /* Trap if accessing an invalid page.  */
4395f716a82SRichard Henderson     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
4400b5ad31dSPeter Maydell                                  gm_bs_bytes, MMU_DATA_LOAD, ra);
4415f716a82SRichard Henderson 
4425f716a82SRichard Henderson     /* The tag is squashed to zero if the page does not support tags.  */
4435f716a82SRichard Henderson     if (!tag_mem) {
4445f716a82SRichard Henderson         return 0;
4455f716a82SRichard Henderson     }
4465f716a82SRichard Henderson 
4475f716a82SRichard Henderson     /*
448851ec6ebSRichard Henderson      * The ordering of elements within the word corresponds to
4497134cb07SRichard Henderson      * a little-endian operation.  Computation of shift comes from
4507134cb07SRichard Henderson      *
4517134cb07SRichard Henderson      *     index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
4527134cb07SRichard Henderson      *     data<index*4+3:index*4> = tag
4537134cb07SRichard Henderson      *
4547134cb07SRichard Henderson      * Because of the alignment of ptr above, BS=6 has shift=0.
4557134cb07SRichard Henderson      * All memory operations are aligned.  Defer support for BS=2,
4567134cb07SRichard Henderson      * requiring insertion or extraction of a nibble, until we
4577134cb07SRichard Henderson      * support a cpu that requires it.
4585f716a82SRichard Henderson      */
459851ec6ebSRichard Henderson     switch (gm_bs) {
4607134cb07SRichard Henderson     case 3:
4617134cb07SRichard Henderson         /* 32 bytes -> 2 tags -> 8 result bits */
4627134cb07SRichard Henderson         ret = *(uint8_t *)tag_mem;
4637134cb07SRichard Henderson         break;
4647134cb07SRichard Henderson     case 4:
4657134cb07SRichard Henderson         /* 64 bytes -> 4 tags -> 16 result bits */
4667134cb07SRichard Henderson         ret = cpu_to_le16(*(uint16_t *)tag_mem);
4677134cb07SRichard Henderson         break;
4687134cb07SRichard Henderson     case 5:
4697134cb07SRichard Henderson         /* 128 bytes -> 8 tags -> 32 result bits */
4707134cb07SRichard Henderson         ret = cpu_to_le32(*(uint32_t *)tag_mem);
4717134cb07SRichard Henderson         break;
472851ec6ebSRichard Henderson     case 6:
473851ec6ebSRichard Henderson         /* 256 bytes -> 16 tags -> 64 result bits */
4747134cb07SRichard Henderson         return cpu_to_le64(*(uint64_t *)tag_mem);
475851ec6ebSRichard Henderson     default:
4767134cb07SRichard Henderson         /*
4777134cb07SRichard Henderson          * CPU configured with unsupported/invalid gm blocksize.
4787134cb07SRichard Henderson          * This is detected early in arm_cpu_realizefn.
4797134cb07SRichard Henderson          */
480851ec6ebSRichard Henderson         g_assert_not_reached();
481851ec6ebSRichard Henderson     }
4827134cb07SRichard Henderson     shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
4837134cb07SRichard Henderson     return ret << shift;
4845f716a82SRichard Henderson }
4855f716a82SRichard Henderson 
4865f716a82SRichard Henderson void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
4875f716a82SRichard Henderson {
488b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
4895f716a82SRichard Henderson     uintptr_t ra = GETPC();
490851ec6ebSRichard Henderson     int gm_bs = env_archcpu(env)->gm_blocksize;
491851ec6ebSRichard Henderson     int gm_bs_bytes = 4 << gm_bs;
4925f716a82SRichard Henderson     void *tag_mem;
4937134cb07SRichard Henderson     int shift;
4945f716a82SRichard Henderson 
495851ec6ebSRichard Henderson     ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
4965f716a82SRichard Henderson 
4975f716a82SRichard Henderson     /* Trap if accessing an invalid page.  */
4985f716a82SRichard Henderson     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
4990b5ad31dSPeter Maydell                                  gm_bs_bytes, MMU_DATA_LOAD, ra);
5005f716a82SRichard Henderson 
5015f716a82SRichard Henderson     /*
5025f716a82SRichard Henderson      * Tag store only happens if the page support tags,
5035f716a82SRichard Henderson      * and if the OS has enabled access to the tags.
5045f716a82SRichard Henderson      */
5055f716a82SRichard Henderson     if (!tag_mem) {
5065f716a82SRichard Henderson         return;
5075f716a82SRichard Henderson     }
5085f716a82SRichard Henderson 
5097134cb07SRichard Henderson     /* See LDGM for comments on BS and on shift.  */
5107134cb07SRichard Henderson     shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
5117134cb07SRichard Henderson     val >>= shift;
512851ec6ebSRichard Henderson     switch (gm_bs) {
5137134cb07SRichard Henderson     case 3:
5147134cb07SRichard Henderson         /* 32 bytes -> 2 tags -> 8 result bits */
5157134cb07SRichard Henderson         *(uint8_t *)tag_mem = val;
5167134cb07SRichard Henderson         break;
5177134cb07SRichard Henderson     case 4:
5187134cb07SRichard Henderson         /* 64 bytes -> 4 tags -> 16 result bits */
5197134cb07SRichard Henderson         *(uint16_t *)tag_mem = cpu_to_le16(val);
5207134cb07SRichard Henderson         break;
5217134cb07SRichard Henderson     case 5:
5227134cb07SRichard Henderson         /* 128 bytes -> 8 tags -> 32 result bits */
5237134cb07SRichard Henderson         *(uint32_t *)tag_mem = cpu_to_le32(val);
5247134cb07SRichard Henderson         break;
525851ec6ebSRichard Henderson     case 6:
5267134cb07SRichard Henderson         /* 256 bytes -> 16 tags -> 64 result bits */
5277134cb07SRichard Henderson         *(uint64_t *)tag_mem = cpu_to_le64(val);
528851ec6ebSRichard Henderson         break;
529851ec6ebSRichard Henderson     default:
530851ec6ebSRichard Henderson         /* cpu configured with unsupported gm blocksize. */
531851ec6ebSRichard Henderson         g_assert_not_reached();
532851ec6ebSRichard Henderson     }
5335f716a82SRichard Henderson }
5345f716a82SRichard Henderson 
5355f716a82SRichard Henderson void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
5365f716a82SRichard Henderson {
5375f716a82SRichard Henderson     uintptr_t ra = GETPC();
538b7770d72SRichard Henderson     int mmu_idx = arm_env_mmu_index(env);
5395f716a82SRichard Henderson     int log2_dcz_bytes, log2_tag_bytes;
5405f716a82SRichard Henderson     intptr_t dcz_bytes, tag_bytes;
5415f716a82SRichard Henderson     uint8_t *mem;
5425f716a82SRichard Henderson 
5435f716a82SRichard Henderson     /*
5445f716a82SRichard Henderson      * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
5455f716a82SRichard Henderson      * i.e. 32 bytes, which is an unreasonably small dcz anyway,
5465f716a82SRichard Henderson      * to make sure that we can access one complete tag byte here.
5475f716a82SRichard Henderson      */
5485f716a82SRichard Henderson     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
5495f716a82SRichard Henderson     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
5505f716a82SRichard Henderson     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
5515f716a82SRichard Henderson     tag_bytes = (intptr_t)1 << log2_tag_bytes;
5525f716a82SRichard Henderson     ptr &= -dcz_bytes;
5535f716a82SRichard Henderson 
5545f716a82SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
5550b5ad31dSPeter Maydell                              MMU_DATA_STORE, ra);
5565f716a82SRichard Henderson     if (mem) {
5575f716a82SRichard Henderson         int tag_pair = (val & 0xf) * 0x11;
5585f716a82SRichard Henderson         memset(mem, tag_pair, tag_bytes);
5595f716a82SRichard Henderson     }
5605f716a82SRichard Henderson }
5610a405be2SRichard Henderson 
56286f0d4c7SPeter Collingbourne static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
56386f0d4c7SPeter Collingbourne                                 uint64_t dirty_ptr, uintptr_t ra)
56486f0d4c7SPeter Collingbourne {
56586f0d4c7SPeter Collingbourne     int is_write, syn;
56686f0d4c7SPeter Collingbourne 
56786f0d4c7SPeter Collingbourne     env->exception.vaddress = dirty_ptr;
56886f0d4c7SPeter Collingbourne 
56986f0d4c7SPeter Collingbourne     is_write = FIELD_EX32(desc, MTEDESC, WRITE);
57086f0d4c7SPeter Collingbourne     syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
57186f0d4c7SPeter Collingbourne                                 0x11);
57286f0d4c7SPeter Collingbourne     raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
57386f0d4c7SPeter Collingbourne     g_assert_not_reached();
57486f0d4c7SPeter Collingbourne }
57586f0d4c7SPeter Collingbourne 
57686f0d4c7SPeter Collingbourne static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
57786f0d4c7SPeter Collingbourne                                  uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
57886f0d4c7SPeter Collingbourne {
57986f0d4c7SPeter Collingbourne     int select;
58086f0d4c7SPeter Collingbourne 
58186f0d4c7SPeter Collingbourne     if (regime_has_2_ranges(arm_mmu_idx)) {
58286f0d4c7SPeter Collingbourne         select = extract64(dirty_ptr, 55, 1);
58386f0d4c7SPeter Collingbourne     } else {
58486f0d4c7SPeter Collingbourne         select = 0;
58586f0d4c7SPeter Collingbourne     }
58686f0d4c7SPeter Collingbourne     env->cp15.tfsr_el[el] |= 1 << select;
58786f0d4c7SPeter Collingbourne #ifdef CONFIG_USER_ONLY
58886f0d4c7SPeter Collingbourne     /*
58986f0d4c7SPeter Collingbourne      * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
59086f0d4c7SPeter Collingbourne      * which then sends a SIGSEGV when the thread is next scheduled.
59186f0d4c7SPeter Collingbourne      * This cpu will return to the main loop at the end of the TB,
59286f0d4c7SPeter Collingbourne      * which is rather sooner than "normal".  But the alternative
59386f0d4c7SPeter Collingbourne      * is waiting until the next syscall.
59486f0d4c7SPeter Collingbourne      */
59586f0d4c7SPeter Collingbourne     qemu_cpu_kick(env_cpu(env));
59686f0d4c7SPeter Collingbourne #endif
59786f0d4c7SPeter Collingbourne }
59886f0d4c7SPeter Collingbourne 
5992e34ff45SRichard Henderson /* Record a tag check failure.  */
60081639989SPeter Maydell void mte_check_fail(CPUARMState *env, uint32_t desc,
6012e34ff45SRichard Henderson                     uint64_t dirty_ptr, uintptr_t ra)
6022e34ff45SRichard Henderson {
603dbf8c321SRichard Henderson     int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
6042e34ff45SRichard Henderson     ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
60586f0d4c7SPeter Collingbourne     int el, reg_el, tcf;
6062e34ff45SRichard Henderson     uint64_t sctlr;
6072e34ff45SRichard Henderson 
6082e34ff45SRichard Henderson     reg_el = regime_el(env, arm_mmu_idx);
6092e34ff45SRichard Henderson     sctlr = env->cp15.sctlr_el[reg_el];
6102e34ff45SRichard Henderson 
6112d928adfSPeter Collingbourne     switch (arm_mmu_idx) {
6122d928adfSPeter Collingbourne     case ARMMMUIdx_E10_0:
6132d928adfSPeter Collingbourne     case ARMMMUIdx_E20_0:
6142d928adfSPeter Collingbourne         el = 0;
6152e34ff45SRichard Henderson         tcf = extract64(sctlr, 38, 2);
6162d928adfSPeter Collingbourne         break;
6172d928adfSPeter Collingbourne     default:
6182d928adfSPeter Collingbourne         el = reg_el;
6192e34ff45SRichard Henderson         tcf = extract64(sctlr, 40, 2);
6202e34ff45SRichard Henderson     }
6212e34ff45SRichard Henderson 
6222e34ff45SRichard Henderson     switch (tcf) {
6232e34ff45SRichard Henderson     case 1:
6245bf100c3SJamie Iles         /* Tag check fail causes a synchronous exception. */
62586f0d4c7SPeter Collingbourne         mte_sync_check_fail(env, desc, dirty_ptr, ra);
62686f0d4c7SPeter Collingbourne         break;
6272e34ff45SRichard Henderson 
6282e34ff45SRichard Henderson     case 0:
6292e34ff45SRichard Henderson         /*
6302e34ff45SRichard Henderson          * Tag check fail does not affect the PE.
6312e34ff45SRichard Henderson          * We eliminate this case by not setting MTE_ACTIVE
6322e34ff45SRichard Henderson          * in tb_flags, so that we never make this runtime call.
6332e34ff45SRichard Henderson          */
6342e34ff45SRichard Henderson         g_assert_not_reached();
6352e34ff45SRichard Henderson 
6362e34ff45SRichard Henderson     case 2:
6372e34ff45SRichard Henderson         /* Tag check fail causes asynchronous flag set.  */
63886f0d4c7SPeter Collingbourne         mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
6392e34ff45SRichard Henderson         break;
6402e34ff45SRichard Henderson 
64186f0d4c7SPeter Collingbourne     case 3:
64286f0d4c7SPeter Collingbourne         /*
64386f0d4c7SPeter Collingbourne          * Tag check fail causes asynchronous flag set for stores, or
64486f0d4c7SPeter Collingbourne          * a synchronous exception for loads.
64586f0d4c7SPeter Collingbourne          */
64686f0d4c7SPeter Collingbourne         if (FIELD_EX32(desc, MTEDESC, WRITE)) {
64786f0d4c7SPeter Collingbourne             mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
64886f0d4c7SPeter Collingbourne         } else {
64986f0d4c7SPeter Collingbourne             mte_sync_check_fail(env, desc, dirty_ptr, ra);
65086f0d4c7SPeter Collingbourne         }
6512e34ff45SRichard Henderson         break;
6522e34ff45SRichard Henderson     }
6532e34ff45SRichard Henderson }
6542e34ff45SRichard Henderson 
6555add8248SRichard Henderson /**
6565add8248SRichard Henderson  * checkN:
6575add8248SRichard Henderson  * @tag: tag memory to test
6585add8248SRichard Henderson  * @odd: true to begin testing at tags at odd nibble
6595add8248SRichard Henderson  * @cmp: the tag to compare against
6605add8248SRichard Henderson  * @count: number of tags to test
6615add8248SRichard Henderson  *
6625add8248SRichard Henderson  * Return the number of successful tests.
6635add8248SRichard Henderson  * Thus a return value < @count indicates a failure.
6645add8248SRichard Henderson  *
6655add8248SRichard Henderson  * A note about sizes: count is expected to be small.
6665add8248SRichard Henderson  *
6675add8248SRichard Henderson  * The most common use will be LDP/STP of two integer registers,
6685add8248SRichard Henderson  * which means 16 bytes of memory touching at most 2 tags, but
6695add8248SRichard Henderson  * often the access is aligned and thus just 1 tag.
6705add8248SRichard Henderson  *
6715add8248SRichard Henderson  * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
6725add8248SRichard Henderson  * touching at most 5 tags.  SVE LDR/STR (vector) with the default
6735add8248SRichard Henderson  * vector length is also 64 bytes; the maximum architectural length
6745add8248SRichard Henderson  * is 256 bytes touching at most 9 tags.
6755add8248SRichard Henderson  *
6765add8248SRichard Henderson  * The loop below uses 7 logical operations and 1 memory operation
6775add8248SRichard Henderson  * per tag pair.  An implementation that loads an aligned word and
6785add8248SRichard Henderson  * uses masking to ignore adjacent tags requires 18 logical operations
6795add8248SRichard Henderson  * and thus does not begin to pay off until 6 tags.
6805add8248SRichard Henderson  * Which, according to the survey above, is unlikely to be common.
6815add8248SRichard Henderson  */
6825add8248SRichard Henderson static int checkN(uint8_t *mem, int odd, int cmp, int count)
6835add8248SRichard Henderson {
6845add8248SRichard Henderson     int n = 0, diff;
6855add8248SRichard Henderson 
6865add8248SRichard Henderson     /* Replicate the test tag and compare.  */
6875add8248SRichard Henderson     cmp *= 0x11;
6885add8248SRichard Henderson     diff = *mem++ ^ cmp;
6895add8248SRichard Henderson 
6905add8248SRichard Henderson     if (odd) {
6915add8248SRichard Henderson         goto start_odd;
6925add8248SRichard Henderson     }
6935add8248SRichard Henderson 
6945add8248SRichard Henderson     while (1) {
6955add8248SRichard Henderson         /* Test even tag. */
6965add8248SRichard Henderson         if (unlikely((diff) & 0x0f)) {
6975add8248SRichard Henderson             break;
6985add8248SRichard Henderson         }
6995add8248SRichard Henderson         if (++n == count) {
7005add8248SRichard Henderson             break;
7015add8248SRichard Henderson         }
7025add8248SRichard Henderson 
7035add8248SRichard Henderson     start_odd:
7045add8248SRichard Henderson         /* Test odd tag. */
7055add8248SRichard Henderson         if (unlikely((diff) & 0xf0)) {
7065add8248SRichard Henderson             break;
7075add8248SRichard Henderson         }
7085add8248SRichard Henderson         if (++n == count) {
7095add8248SRichard Henderson             break;
7105add8248SRichard Henderson         }
7115add8248SRichard Henderson 
7125add8248SRichard Henderson         diff = *mem++ ^ cmp;
7135add8248SRichard Henderson     }
7145add8248SRichard Henderson     return n;
7155add8248SRichard Henderson }
7165add8248SRichard Henderson 
717f8c8a860SRichard Henderson /**
71869c51dc3SPeter Maydell  * checkNrev:
71969c51dc3SPeter Maydell  * @tag: tag memory to test
72069c51dc3SPeter Maydell  * @odd: true to begin testing at tags at odd nibble
72169c51dc3SPeter Maydell  * @cmp: the tag to compare against
72269c51dc3SPeter Maydell  * @count: number of tags to test
72369c51dc3SPeter Maydell  *
72469c51dc3SPeter Maydell  * Return the number of successful tests.
72569c51dc3SPeter Maydell  * Thus a return value < @count indicates a failure.
72669c51dc3SPeter Maydell  *
72769c51dc3SPeter Maydell  * This is like checkN, but it runs backwards, checking the
72869c51dc3SPeter Maydell  * tags starting with @tag and then the tags preceding it.
72969c51dc3SPeter Maydell  * This is needed by the backwards-memory-copying operations.
73069c51dc3SPeter Maydell  */
73169c51dc3SPeter Maydell static int checkNrev(uint8_t *mem, int odd, int cmp, int count)
73269c51dc3SPeter Maydell {
73369c51dc3SPeter Maydell     int n = 0, diff;
73469c51dc3SPeter Maydell 
73569c51dc3SPeter Maydell     /* Replicate the test tag and compare.  */
73669c51dc3SPeter Maydell     cmp *= 0x11;
73769c51dc3SPeter Maydell     diff = *mem-- ^ cmp;
73869c51dc3SPeter Maydell 
73969c51dc3SPeter Maydell     if (!odd) {
74069c51dc3SPeter Maydell         goto start_even;
74169c51dc3SPeter Maydell     }
74269c51dc3SPeter Maydell 
74369c51dc3SPeter Maydell     while (1) {
74469c51dc3SPeter Maydell         /* Test odd tag. */
74569c51dc3SPeter Maydell         if (unlikely((diff) & 0xf0)) {
74669c51dc3SPeter Maydell             break;
74769c51dc3SPeter Maydell         }
74869c51dc3SPeter Maydell         if (++n == count) {
74969c51dc3SPeter Maydell             break;
75069c51dc3SPeter Maydell         }
75169c51dc3SPeter Maydell 
75269c51dc3SPeter Maydell     start_even:
75369c51dc3SPeter Maydell         /* Test even tag. */
75469c51dc3SPeter Maydell         if (unlikely((diff) & 0x0f)) {
75569c51dc3SPeter Maydell             break;
75669c51dc3SPeter Maydell         }
75769c51dc3SPeter Maydell         if (++n == count) {
75869c51dc3SPeter Maydell             break;
75969c51dc3SPeter Maydell         }
76069c51dc3SPeter Maydell 
76169c51dc3SPeter Maydell         diff = *mem-- ^ cmp;
76269c51dc3SPeter Maydell     }
76369c51dc3SPeter Maydell     return n;
76469c51dc3SPeter Maydell }
76569c51dc3SPeter Maydell 
76669c51dc3SPeter Maydell /**
767f8c8a860SRichard Henderson  * mte_probe_int() - helper for mte_probe and mte_check
768f8c8a860SRichard Henderson  * @env: CPU environment
769f8c8a860SRichard Henderson  * @desc: MTEDESC descriptor
770f8c8a860SRichard Henderson  * @ptr: virtual address of the base of the access
771f8c8a860SRichard Henderson  * @fault: return virtual address of the first check failure
772f8c8a860SRichard Henderson  *
773f8c8a860SRichard Henderson  * Internal routine for both mte_probe and mte_check.
774f8c8a860SRichard Henderson  * Return zero on failure, filling in *fault.
775f8c8a860SRichard Henderson  * Return negative on trivial success for tbi disabled.
776f8c8a860SRichard Henderson  * Return positive on success with tbi enabled.
777f8c8a860SRichard Henderson  */
778f8c8a860SRichard Henderson static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
77928f32503SRichard Henderson                          uintptr_t ra, uint64_t *fault)
7805add8248SRichard Henderson {
7815add8248SRichard Henderson     int mmu_idx, ptr_tag, bit55;
78298f96050SRichard Henderson     uint64_t ptr_last, prev_page, next_page;
78398f96050SRichard Henderson     uint64_t tag_first, tag_last;
7840b5ad31dSPeter Maydell     uint32_t sizem1, tag_count, n, c;
7855add8248SRichard Henderson     uint8_t *mem1, *mem2;
7865add8248SRichard Henderson     MMUAccessType type;
7875add8248SRichard Henderson 
7885add8248SRichard Henderson     bit55 = extract64(ptr, 55, 1);
789f8c8a860SRichard Henderson     *fault = ptr;
7905add8248SRichard Henderson 
7915add8248SRichard Henderson     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
7925add8248SRichard Henderson     if (unlikely(!tbi_check(desc, bit55))) {
793f8c8a860SRichard Henderson         return -1;
7945add8248SRichard Henderson     }
7955add8248SRichard Henderson 
7965add8248SRichard Henderson     ptr_tag = allocation_tag_from_addr(ptr);
7975add8248SRichard Henderson 
7985add8248SRichard Henderson     if (tcma_check(desc, bit55, ptr_tag)) {
799f8c8a860SRichard Henderson         return 1;
8005add8248SRichard Henderson     }
8015add8248SRichard Henderson 
8025add8248SRichard Henderson     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
8035add8248SRichard Henderson     type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
80428f32503SRichard Henderson     sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
8055add8248SRichard Henderson 
80698f96050SRichard Henderson     /* Find the addr of the end of the access */
80728f32503SRichard Henderson     ptr_last = ptr + sizem1;
8085add8248SRichard Henderson 
8095add8248SRichard Henderson     /* Round the bounds to the tag granule, and compute the number of tags. */
8105add8248SRichard Henderson     tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
81198f96050SRichard Henderson     tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
81298f96050SRichard Henderson     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
8135add8248SRichard Henderson 
8145add8248SRichard Henderson     /* Locate the page boundaries. */
8155add8248SRichard Henderson     prev_page = ptr & TARGET_PAGE_MASK;
8165add8248SRichard Henderson     next_page = prev_page + TARGET_PAGE_SIZE;
8175add8248SRichard Henderson 
818d3327a38SRichard Henderson     if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
8195add8248SRichard Henderson         /* Memory access stays on one page. */
82028f32503SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
8210b5ad31dSPeter Maydell                                   MMU_DATA_LOAD, ra);
8225add8248SRichard Henderson         if (!mem1) {
823f8c8a860SRichard Henderson             return 1;
8245add8248SRichard Henderson         }
8255add8248SRichard Henderson         /* Perform all of the comparisons. */
8265add8248SRichard Henderson         n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
8275add8248SRichard Henderson     } else {
8285add8248SRichard Henderson         /* Memory access crosses to next page. */
8295add8248SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
8300b5ad31dSPeter Maydell                                   MMU_DATA_LOAD, ra);
8315add8248SRichard Henderson 
8325add8248SRichard Henderson         mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
83398f96050SRichard Henderson                                   ptr_last - next_page + 1,
8340b5ad31dSPeter Maydell                                   MMU_DATA_LOAD, ra);
8355add8248SRichard Henderson 
8365add8248SRichard Henderson         /*
8375add8248SRichard Henderson          * Perform all of the comparisons.
8385add8248SRichard Henderson          * Note the possible but unlikely case of the operation spanning
8395add8248SRichard Henderson          * two pages that do not both have tagging enabled.
8405add8248SRichard Henderson          */
8415add8248SRichard Henderson         n = c = (next_page - tag_first) / TAG_GRANULE;
8425add8248SRichard Henderson         if (mem1) {
8435add8248SRichard Henderson             n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
8445add8248SRichard Henderson         }
8455add8248SRichard Henderson         if (n == c) {
8465add8248SRichard Henderson             if (!mem2) {
847f8c8a860SRichard Henderson                 return 1;
8485add8248SRichard Henderson             }
8495add8248SRichard Henderson             n += checkN(mem2, 0, ptr_tag, tag_count - c);
8505add8248SRichard Henderson         }
8515add8248SRichard Henderson     }
8525add8248SRichard Henderson 
853f8c8a860SRichard Henderson     if (likely(n == tag_count)) {
854f8c8a860SRichard Henderson         return 1;
855f8c8a860SRichard Henderson     }
856f8c8a860SRichard Henderson 
8575add8248SRichard Henderson     /*
85898f96050SRichard Henderson      * If we failed, we know which granule.  For the first granule, the
85998f96050SRichard Henderson      * failure address is @ptr, the first byte accessed.  Otherwise the
86098f96050SRichard Henderson      * failure address is the first byte of the nth granule.
8615add8248SRichard Henderson      */
862f8c8a860SRichard Henderson     if (n > 0) {
863f8c8a860SRichard Henderson         *fault = tag_first + n * TAG_GRANULE;
864f8c8a860SRichard Henderson     }
865f8c8a860SRichard Henderson     return 0;
8665add8248SRichard Henderson }
8675add8248SRichard Henderson 
868bd47b61cSRichard Henderson uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
869f8c8a860SRichard Henderson {
870f8c8a860SRichard Henderson     uint64_t fault;
87128f32503SRichard Henderson     int ret = mte_probe_int(env, desc, ptr, ra, &fault);
872f8c8a860SRichard Henderson 
873f8c8a860SRichard Henderson     if (unlikely(ret == 0)) {
874f8c8a860SRichard Henderson         mte_check_fail(env, desc, fault, ra);
875f8c8a860SRichard Henderson     } else if (ret < 0) {
876f8c8a860SRichard Henderson         return ptr;
877f8c8a860SRichard Henderson     }
8785add8248SRichard Henderson     return useronly_clean_ptr(ptr);
8795add8248SRichard Henderson }
8805add8248SRichard Henderson 
881bd47b61cSRichard Henderson uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
88273ceeb00SRichard Henderson {
883523da6b9SRichard Henderson     /*
884523da6b9SRichard Henderson      * R_XCHFJ: Alignment check not caused by memory type is priority 1,
885523da6b9SRichard Henderson      * higher than any translation fault.  When MTE is disabled, tcg
886523da6b9SRichard Henderson      * performs the alignment check during the code generated for the
887523da6b9SRichard Henderson      * memory access.  With MTE enabled, we must check this here before
888523da6b9SRichard Henderson      * raising any translation fault in allocation_tag_mem.
889523da6b9SRichard Henderson      */
890523da6b9SRichard Henderson     unsigned align = FIELD_EX32(desc, MTEDESC, ALIGN);
891523da6b9SRichard Henderson     if (unlikely(align)) {
892523da6b9SRichard Henderson         align = (1u << align) - 1;
893523da6b9SRichard Henderson         if (unlikely(ptr & align)) {
894523da6b9SRichard Henderson             int idx = FIELD_EX32(desc, MTEDESC, MIDX);
895523da6b9SRichard Henderson             bool w = FIELD_EX32(desc, MTEDESC, WRITE);
896523da6b9SRichard Henderson             MMUAccessType type = w ? MMU_DATA_STORE : MMU_DATA_LOAD;
897523da6b9SRichard Henderson             arm_cpu_do_unaligned_access(env_cpu(env), ptr, type, idx, GETPC());
898523da6b9SRichard Henderson         }
899523da6b9SRichard Henderson     }
900523da6b9SRichard Henderson 
901bd47b61cSRichard Henderson     return mte_check(env, desc, ptr, GETPC());
9024a09a213SRichard Henderson }
9034a09a213SRichard Henderson 
9044a09a213SRichard Henderson /*
905d304d280SRichard Henderson  * No-fault version of mte_check, to be used by SVE for MemSingleNF.
9064a09a213SRichard Henderson  * Returns false if the access is Checked and the check failed.  This
9074a09a213SRichard Henderson  * is only intended to probe the tag -- the validity of the page must
9084a09a213SRichard Henderson  * be checked beforehand.
9094a09a213SRichard Henderson  */
910d304d280SRichard Henderson bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
9114a09a213SRichard Henderson {
9124a09a213SRichard Henderson     uint64_t fault;
91328f32503SRichard Henderson     int ret = mte_probe_int(env, desc, ptr, 0, &fault);
9144a09a213SRichard Henderson 
9154a09a213SRichard Henderson     return ret != 0;
9164a09a213SRichard Henderson }
9174a09a213SRichard Henderson 
91846dc1bc0SRichard Henderson /*
91946dc1bc0SRichard Henderson  * Perform an MTE checked access for DC_ZVA.
92046dc1bc0SRichard Henderson  */
92146dc1bc0SRichard Henderson uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
92246dc1bc0SRichard Henderson {
92346dc1bc0SRichard Henderson     uintptr_t ra = GETPC();
92446dc1bc0SRichard Henderson     int log2_dcz_bytes, log2_tag_bytes;
92546dc1bc0SRichard Henderson     int mmu_idx, bit55;
92646dc1bc0SRichard Henderson     intptr_t dcz_bytes, tag_bytes, i;
92746dc1bc0SRichard Henderson     void *mem;
92846dc1bc0SRichard Henderson     uint64_t ptr_tag, mem_tag, align_ptr;
92946dc1bc0SRichard Henderson 
93046dc1bc0SRichard Henderson     bit55 = extract64(ptr, 55, 1);
93146dc1bc0SRichard Henderson 
93246dc1bc0SRichard Henderson     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
93346dc1bc0SRichard Henderson     if (unlikely(!tbi_check(desc, bit55))) {
93446dc1bc0SRichard Henderson         return ptr;
93546dc1bc0SRichard Henderson     }
93646dc1bc0SRichard Henderson 
93746dc1bc0SRichard Henderson     ptr_tag = allocation_tag_from_addr(ptr);
93846dc1bc0SRichard Henderson 
93946dc1bc0SRichard Henderson     if (tcma_check(desc, bit55, ptr_tag)) {
94046dc1bc0SRichard Henderson         goto done;
94146dc1bc0SRichard Henderson     }
94246dc1bc0SRichard Henderson 
94346dc1bc0SRichard Henderson     /*
94446dc1bc0SRichard Henderson      * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
94546dc1bc0SRichard Henderson      * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
94646dc1bc0SRichard Henderson      * sure that we can access one complete tag byte here.
94746dc1bc0SRichard Henderson      */
94846dc1bc0SRichard Henderson     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
94946dc1bc0SRichard Henderson     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
95046dc1bc0SRichard Henderson     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
95146dc1bc0SRichard Henderson     tag_bytes = (intptr_t)1 << log2_tag_bytes;
95246dc1bc0SRichard Henderson     align_ptr = ptr & -dcz_bytes;
95346dc1bc0SRichard Henderson 
95446dc1bc0SRichard Henderson     /*
95546dc1bc0SRichard Henderson      * Trap if accessing an invalid page.  DC_ZVA requires that we supply
95646dc1bc0SRichard Henderson      * the original pointer for an invalid page.  But watchpoints require
95746dc1bc0SRichard Henderson      * that we probe the actual space.  So do both.
95846dc1bc0SRichard Henderson      */
95946dc1bc0SRichard Henderson     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
96046dc1bc0SRichard Henderson     (void) probe_write(env, ptr, 1, mmu_idx, ra);
96146dc1bc0SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
9620b5ad31dSPeter Maydell                              dcz_bytes, MMU_DATA_LOAD, ra);
96346dc1bc0SRichard Henderson     if (!mem) {
96446dc1bc0SRichard Henderson         goto done;
96546dc1bc0SRichard Henderson     }
96646dc1bc0SRichard Henderson 
96746dc1bc0SRichard Henderson     /*
96846dc1bc0SRichard Henderson      * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
96946dc1bc0SRichard Henderson      * it is quite easy to perform all of the comparisons at once without
97046dc1bc0SRichard Henderson      * any extra masking.
97146dc1bc0SRichard Henderson      *
97246dc1bc0SRichard Henderson      * The most common zva block size is 64; some of the thunderx cpus use
97346dc1bc0SRichard Henderson      * a block size of 128.  For user-only, aarch64_max_initfn will set the
97446dc1bc0SRichard Henderson      * block size to 512.  Fill out the other cases for future-proofing.
97546dc1bc0SRichard Henderson      *
97646dc1bc0SRichard Henderson      * In order to be able to find the first miscompare later, we want the
97746dc1bc0SRichard Henderson      * tag bytes to be in little-endian order.
97846dc1bc0SRichard Henderson      */
97946dc1bc0SRichard Henderson     switch (log2_tag_bytes) {
98046dc1bc0SRichard Henderson     case 0: /* zva_blocksize 32 */
98146dc1bc0SRichard Henderson         mem_tag = *(uint8_t *)mem;
98246dc1bc0SRichard Henderson         ptr_tag *= 0x11u;
98346dc1bc0SRichard Henderson         break;
98446dc1bc0SRichard Henderson     case 1: /* zva_blocksize 64 */
98546dc1bc0SRichard Henderson         mem_tag = cpu_to_le16(*(uint16_t *)mem);
98646dc1bc0SRichard Henderson         ptr_tag *= 0x1111u;
98746dc1bc0SRichard Henderson         break;
98846dc1bc0SRichard Henderson     case 2: /* zva_blocksize 128 */
98946dc1bc0SRichard Henderson         mem_tag = cpu_to_le32(*(uint32_t *)mem);
99046dc1bc0SRichard Henderson         ptr_tag *= 0x11111111u;
99146dc1bc0SRichard Henderson         break;
99246dc1bc0SRichard Henderson     case 3: /* zva_blocksize 256 */
99346dc1bc0SRichard Henderson         mem_tag = cpu_to_le64(*(uint64_t *)mem);
99446dc1bc0SRichard Henderson         ptr_tag *= 0x1111111111111111ull;
99546dc1bc0SRichard Henderson         break;
99646dc1bc0SRichard Henderson 
99746dc1bc0SRichard Henderson     default: /* zva_blocksize 512, 1024, 2048 */
99846dc1bc0SRichard Henderson         ptr_tag *= 0x1111111111111111ull;
99946dc1bc0SRichard Henderson         i = 0;
100046dc1bc0SRichard Henderson         do {
100146dc1bc0SRichard Henderson             mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
100246dc1bc0SRichard Henderson             if (unlikely(mem_tag != ptr_tag)) {
100346dc1bc0SRichard Henderson                 goto fail;
100446dc1bc0SRichard Henderson             }
100546dc1bc0SRichard Henderson             i += 8;
100646dc1bc0SRichard Henderson             align_ptr += 16 * TAG_GRANULE;
100746dc1bc0SRichard Henderson         } while (i < tag_bytes);
100846dc1bc0SRichard Henderson         goto done;
100946dc1bc0SRichard Henderson     }
101046dc1bc0SRichard Henderson 
101146dc1bc0SRichard Henderson     if (likely(mem_tag == ptr_tag)) {
101246dc1bc0SRichard Henderson         goto done;
101346dc1bc0SRichard Henderson     }
101446dc1bc0SRichard Henderson 
101546dc1bc0SRichard Henderson  fail:
101646dc1bc0SRichard Henderson     /* Locate the first nibble that differs. */
101746dc1bc0SRichard Henderson     i = ctz64(mem_tag ^ ptr_tag) >> 4;
1018dbf8c321SRichard Henderson     mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
101946dc1bc0SRichard Henderson 
102046dc1bc0SRichard Henderson  done:
102146dc1bc0SRichard Henderson     return useronly_clean_ptr(ptr);
102246dc1bc0SRichard Henderson }
102381639989SPeter Maydell 
102481639989SPeter Maydell uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
102581639989SPeter Maydell                         uint32_t desc)
102681639989SPeter Maydell {
102781639989SPeter Maydell     int mmu_idx, tag_count;
102881639989SPeter Maydell     uint64_t ptr_tag, tag_first, tag_last;
102981639989SPeter Maydell     void *mem;
103081639989SPeter Maydell     bool w = FIELD_EX32(desc, MTEDESC, WRITE);
103181639989SPeter Maydell     uint32_t n;
103281639989SPeter Maydell 
103381639989SPeter Maydell     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
103481639989SPeter Maydell     /* True probe; this will never fault */
103581639989SPeter Maydell     mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
103681639989SPeter Maydell                                    w ? MMU_DATA_STORE : MMU_DATA_LOAD,
103781639989SPeter Maydell                                    size, MMU_DATA_LOAD, true, 0);
103881639989SPeter Maydell     if (!mem) {
103981639989SPeter Maydell         return size;
104081639989SPeter Maydell     }
104181639989SPeter Maydell 
104281639989SPeter Maydell     /*
104381639989SPeter Maydell      * TODO: checkN() is not designed for checks of the size we expect
104481639989SPeter Maydell      * for FEAT_MOPS operations, so we should implement this differently.
104581639989SPeter Maydell      * Maybe we should do something like
104681639989SPeter Maydell      *   if (region start and size are aligned nicely) {
104781639989SPeter Maydell      *      do direct loads of 64 tag bits at a time;
104881639989SPeter Maydell      *   } else {
104981639989SPeter Maydell      *      call checkN()
105081639989SPeter Maydell      *   }
105181639989SPeter Maydell      */
105281639989SPeter Maydell     /* Round the bounds to the tag granule, and compute the number of tags. */
105381639989SPeter Maydell     ptr_tag = allocation_tag_from_addr(ptr);
105481639989SPeter Maydell     tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
105581639989SPeter Maydell     tag_last = QEMU_ALIGN_DOWN(ptr + size - 1, TAG_GRANULE);
105681639989SPeter Maydell     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
105781639989SPeter Maydell     n = checkN(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
105881639989SPeter Maydell     if (likely(n == tag_count)) {
105981639989SPeter Maydell         return size;
106081639989SPeter Maydell     }
106181639989SPeter Maydell 
106281639989SPeter Maydell     /*
106381639989SPeter Maydell      * Failure; for the first granule, it's at @ptr. Otherwise
106481639989SPeter Maydell      * it's at the first byte of the nth granule. Calculate how
106581639989SPeter Maydell      * many bytes we can access without hitting that failure.
106681639989SPeter Maydell      */
106781639989SPeter Maydell     if (n == 0) {
106881639989SPeter Maydell         return 0;
106981639989SPeter Maydell     } else {
107081639989SPeter Maydell         return n * TAG_GRANULE - (ptr - tag_first);
107181639989SPeter Maydell     }
107281639989SPeter Maydell }
10736087df57SPeter Maydell 
107469c51dc3SPeter Maydell uint64_t mte_mops_probe_rev(CPUARMState *env, uint64_t ptr, uint64_t size,
107569c51dc3SPeter Maydell                             uint32_t desc)
107669c51dc3SPeter Maydell {
107769c51dc3SPeter Maydell     int mmu_idx, tag_count;
107869c51dc3SPeter Maydell     uint64_t ptr_tag, tag_first, tag_last;
107969c51dc3SPeter Maydell     void *mem;
108069c51dc3SPeter Maydell     bool w = FIELD_EX32(desc, MTEDESC, WRITE);
108169c51dc3SPeter Maydell     uint32_t n;
108269c51dc3SPeter Maydell 
108369c51dc3SPeter Maydell     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
10844d044472SPeter Maydell     /*
10854d044472SPeter Maydell      * True probe; this will never fault. Note that our caller passes
10864d044472SPeter Maydell      * us a pointer to the end of the region, but allocation_tag_mem_probe()
10874d044472SPeter Maydell      * wants a pointer to the start. Because we know we don't span a page
10884d044472SPeter Maydell      * boundary and that allocation_tag_mem_probe() doesn't otherwise care
10894d044472SPeter Maydell      * about the size, pass in a size of 1 byte. This is simpler than
10904d044472SPeter Maydell      * adjusting the ptr to point to the start of the region and then having
10914d044472SPeter Maydell      * to adjust the returned 'mem' to get the end of the tag memory.
10924d044472SPeter Maydell      */
109369c51dc3SPeter Maydell     mem = allocation_tag_mem_probe(env, mmu_idx, ptr,
109469c51dc3SPeter Maydell                                    w ? MMU_DATA_STORE : MMU_DATA_LOAD,
10954d044472SPeter Maydell                                    1, MMU_DATA_LOAD, true, 0);
109669c51dc3SPeter Maydell     if (!mem) {
109769c51dc3SPeter Maydell         return size;
109869c51dc3SPeter Maydell     }
109969c51dc3SPeter Maydell 
110069c51dc3SPeter Maydell     /*
110169c51dc3SPeter Maydell      * TODO: checkNrev() is not designed for checks of the size we expect
110269c51dc3SPeter Maydell      * for FEAT_MOPS operations, so we should implement this differently.
110369c51dc3SPeter Maydell      * Maybe we should do something like
110469c51dc3SPeter Maydell      *   if (region start and size are aligned nicely) {
110569c51dc3SPeter Maydell      *      do direct loads of 64 tag bits at a time;
110669c51dc3SPeter Maydell      *   } else {
110769c51dc3SPeter Maydell      *      call checkN()
110869c51dc3SPeter Maydell      *   }
110969c51dc3SPeter Maydell      */
111069c51dc3SPeter Maydell     /* Round the bounds to the tag granule, and compute the number of tags. */
111169c51dc3SPeter Maydell     ptr_tag = allocation_tag_from_addr(ptr);
111269c51dc3SPeter Maydell     tag_first = QEMU_ALIGN_DOWN(ptr - (size - 1), TAG_GRANULE);
111369c51dc3SPeter Maydell     tag_last = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
111469c51dc3SPeter Maydell     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
111569c51dc3SPeter Maydell     n = checkNrev(mem, ptr & TAG_GRANULE, ptr_tag, tag_count);
111669c51dc3SPeter Maydell     if (likely(n == tag_count)) {
111769c51dc3SPeter Maydell         return size;
111869c51dc3SPeter Maydell     }
111969c51dc3SPeter Maydell 
112069c51dc3SPeter Maydell     /*
112169c51dc3SPeter Maydell      * Failure; for the first granule, it's at @ptr. Otherwise
112269c51dc3SPeter Maydell      * it's at the last byte of the nth granule. Calculate how
112369c51dc3SPeter Maydell      * many bytes we can access without hitting that failure.
112469c51dc3SPeter Maydell      */
112569c51dc3SPeter Maydell     if (n == 0) {
112669c51dc3SPeter Maydell         return 0;
112769c51dc3SPeter Maydell     } else {
112869c51dc3SPeter Maydell         return (n - 1) * TAG_GRANULE + ((ptr + 1) - tag_last);
112969c51dc3SPeter Maydell     }
113069c51dc3SPeter Maydell }
113169c51dc3SPeter Maydell 
11326087df57SPeter Maydell void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
11336087df57SPeter Maydell                        uint32_t desc)
11346087df57SPeter Maydell {
11356087df57SPeter Maydell     int mmu_idx, tag_count;
11366087df57SPeter Maydell     uint64_t ptr_tag;
11376087df57SPeter Maydell     void *mem;
11386087df57SPeter Maydell 
11396087df57SPeter Maydell     if (!desc) {
11406087df57SPeter Maydell         /* Tags not actually enabled */
11416087df57SPeter Maydell         return;
11426087df57SPeter Maydell     }
11436087df57SPeter Maydell 
11446087df57SPeter Maydell     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
11456087df57SPeter Maydell     /* True probe: this will never fault */
11466087df57SPeter Maydell     mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size,
11476087df57SPeter Maydell                                    MMU_DATA_STORE, true, 0);
11486087df57SPeter Maydell     if (!mem) {
11496087df57SPeter Maydell         return;
11506087df57SPeter Maydell     }
11516087df57SPeter Maydell 
11526087df57SPeter Maydell     /*
11536087df57SPeter Maydell      * We know that ptr and size are both TAG_GRANULE aligned; store
11546087df57SPeter Maydell      * the tag from the pointer value into the tag memory.
11556087df57SPeter Maydell      */
11566087df57SPeter Maydell     ptr_tag = allocation_tag_from_addr(ptr);
11576087df57SPeter Maydell     tag_count = size / TAG_GRANULE;
11586087df57SPeter Maydell     if (ptr & TAG_GRANULE) {
11596087df57SPeter Maydell         /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
11606087df57SPeter Maydell         store_tag1_parallel(TAG_GRANULE, mem, ptr_tag);
11616087df57SPeter Maydell         mem++;
11626087df57SPeter Maydell         tag_count--;
11636087df57SPeter Maydell     }
11646087df57SPeter Maydell     memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2);
11656087df57SPeter Maydell     if (tag_count & 1) {
11666087df57SPeter Maydell         /* Final trailing unaligned nibble */
11676087df57SPeter Maydell         mem += tag_count / 2;
11686087df57SPeter Maydell         store_tag1_parallel(0, mem, ptr_tag);
11696087df57SPeter Maydell     }
11706087df57SPeter Maydell }
1171