xref: /qemu/target/arm/tcg/mte_helper.c (revision d507e6c565c8c44488eaf04ff4da3ba2183c6be5)
1da54941fSRichard Henderson /*
2da54941fSRichard Henderson  * ARM v8.5-MemTag Operations
3da54941fSRichard Henderson  *
4da54941fSRichard Henderson  * Copyright (c) 2020 Linaro, Ltd.
5da54941fSRichard Henderson  *
6da54941fSRichard Henderson  * This library is free software; you can redistribute it and/or
7da54941fSRichard Henderson  * modify it under the terms of the GNU Lesser General Public
8da54941fSRichard Henderson  * License as published by the Free Software Foundation; either
9da54941fSRichard Henderson  * version 2.1 of the License, or (at your option) any later version.
10da54941fSRichard Henderson  *
11da54941fSRichard Henderson  * This library is distributed in the hope that it will be useful,
12da54941fSRichard Henderson  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13da54941fSRichard Henderson  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14da54941fSRichard Henderson  * Lesser General Public License for more details.
15da54941fSRichard Henderson  *
16da54941fSRichard Henderson  * You should have received a copy of the GNU Lesser General Public
17da54941fSRichard Henderson  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18da54941fSRichard Henderson  */
19da54941fSRichard Henderson 
20da54941fSRichard Henderson #include "qemu/osdep.h"
21cd617484SPhilippe Mathieu-Daudé #include "qemu/log.h"
22da54941fSRichard Henderson #include "cpu.h"
23da54941fSRichard Henderson #include "internals.h"
24da54941fSRichard Henderson #include "exec/exec-all.h"
25e4d5bf4fSRichard Henderson #include "exec/ram_addr.h"
26da54941fSRichard Henderson #include "exec/cpu_ldst.h"
27da54941fSRichard Henderson #include "exec/helper-proto.h"
28d4f6dda1SRichard Henderson #include "qapi/error.h"
29d4f6dda1SRichard Henderson #include "qemu/guest-random.h"
30da54941fSRichard Henderson 
31da54941fSRichard Henderson 
32da54941fSRichard Henderson static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
33da54941fSRichard Henderson {
34da54941fSRichard Henderson     if (exclude == 0xffff) {
35da54941fSRichard Henderson         return 0;
36da54941fSRichard Henderson     }
37da54941fSRichard Henderson     if (offset == 0) {
38da54941fSRichard Henderson         while (exclude & (1 << tag)) {
39da54941fSRichard Henderson             tag = (tag + 1) & 15;
40da54941fSRichard Henderson         }
41da54941fSRichard Henderson     } else {
42da54941fSRichard Henderson         do {
43da54941fSRichard Henderson             do {
44da54941fSRichard Henderson                 tag = (tag + 1) & 15;
45da54941fSRichard Henderson             } while (exclude & (1 << tag));
46da54941fSRichard Henderson         } while (--offset > 0);
47da54941fSRichard Henderson     }
48da54941fSRichard Henderson     return tag;
49da54941fSRichard Henderson }
50da54941fSRichard Henderson 
51c15294c1SRichard Henderson /**
52c15294c1SRichard Henderson  * allocation_tag_mem:
53c15294c1SRichard Henderson  * @env: the cpu environment
54c15294c1SRichard Henderson  * @ptr_mmu_idx: the addressing regime to use for the virtual address
55c15294c1SRichard Henderson  * @ptr: the virtual address for which to look up tag memory
56c15294c1SRichard Henderson  * @ptr_access: the access to use for the virtual address
57c15294c1SRichard Henderson  * @ptr_size: the number of bytes in the normal memory access
58c15294c1SRichard Henderson  * @tag_access: the access to use for the tag memory
59c15294c1SRichard Henderson  * @tag_size: the number of bytes in the tag memory access
60c15294c1SRichard Henderson  * @ra: the return address for exception handling
61c15294c1SRichard Henderson  *
62c15294c1SRichard Henderson  * Our tag memory is formatted as a sequence of little-endian nibbles.
63c15294c1SRichard Henderson  * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two
64c15294c1SRichard Henderson  * tags, with the tag at [3:0] for the lower addr and the tag at [7:4]
65c15294c1SRichard Henderson  * for the higher addr.
66c15294c1SRichard Henderson  *
67c15294c1SRichard Henderson  * Here, resolve the physical address from the virtual address, and return
68c15294c1SRichard Henderson  * a pointer to the corresponding tag byte.  Exit with exception if the
69c15294c1SRichard Henderson  * virtual address is not accessible for @ptr_access.
70c15294c1SRichard Henderson  *
71c15294c1SRichard Henderson  * The @ptr_size and @tag_size values may not have an obvious relation
72c15294c1SRichard Henderson  * due to the alignment of @ptr, and the number of tag checks required.
73c15294c1SRichard Henderson  *
74c15294c1SRichard Henderson  * If there is no tag storage corresponding to @ptr, return NULL.
75c15294c1SRichard Henderson  */
76c15294c1SRichard Henderson static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx,
77c15294c1SRichard Henderson                                    uint64_t ptr, MMUAccessType ptr_access,
78c15294c1SRichard Henderson                                    int ptr_size, MMUAccessType tag_access,
79c15294c1SRichard Henderson                                    int tag_size, uintptr_t ra)
80c15294c1SRichard Henderson {
81e4d5bf4fSRichard Henderson #ifdef CONFIG_USER_ONLY
82a11d3830SRichard Henderson     uint64_t clean_ptr = useronly_clean_ptr(ptr);
83a11d3830SRichard Henderson     int flags = page_get_flags(clean_ptr);
84a11d3830SRichard Henderson     uint8_t *tags;
85a11d3830SRichard Henderson     uintptr_t index;
86a11d3830SRichard Henderson 
87ff38bca7SRichard Henderson     if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) {
885e98763cSRichard Henderson         cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access,
895e98763cSRichard Henderson                               !(flags & PAGE_VALID), ra);
90a11d3830SRichard Henderson     }
91a11d3830SRichard Henderson 
92a11d3830SRichard Henderson     /* Require both MAP_ANON and PROT_MTE for the page. */
93a11d3830SRichard Henderson     if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) {
94c15294c1SRichard Henderson         return NULL;
95a11d3830SRichard Henderson     }
96a11d3830SRichard Henderson 
97a11d3830SRichard Henderson     tags = page_get_target_data(clean_ptr);
98a11d3830SRichard Henderson 
99a11d3830SRichard Henderson     index = extract32(ptr, LOG2_TAG_GRANULE + 1,
100a11d3830SRichard Henderson                       TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
101a11d3830SRichard Henderson     return tags + index;
102e4d5bf4fSRichard Henderson #else
10325d3ec58SRichard Henderson     CPUTLBEntryFull *full;
104b8967ddfSRichard Henderson     MemTxAttrs attrs;
105e4d5bf4fSRichard Henderson     int in_page, flags;
106e4d5bf4fSRichard Henderson     hwaddr ptr_paddr, tag_paddr, xlat;
107e4d5bf4fSRichard Henderson     MemoryRegion *mr;
108e4d5bf4fSRichard Henderson     ARMASIdx tag_asi;
109e4d5bf4fSRichard Henderson     AddressSpace *tag_as;
110e4d5bf4fSRichard Henderson     void *host;
111e4d5bf4fSRichard Henderson 
112e4d5bf4fSRichard Henderson     /*
113e4d5bf4fSRichard Henderson      * Probe the first byte of the virtual address.  This raises an
114e4d5bf4fSRichard Henderson      * exception for inaccessible pages, and resolves the virtual address
115e4d5bf4fSRichard Henderson      * into the softmmu tlb.
116e4d5bf4fSRichard Henderson      *
117d304d280SRichard Henderson      * When RA == 0, this is for mte_probe.  The page is expected to be
118e4d5bf4fSRichard Henderson      * valid.  Indicate to probe_access_flags no-fault, then assert that
119e4d5bf4fSRichard Henderson      * we received a valid page.
120e4d5bf4fSRichard Henderson      */
121*d507e6c5SRichard Henderson     flags = probe_access_full(env, ptr, 0, ptr_access, ptr_mmu_idx,
122b8967ddfSRichard Henderson                               ra == 0, &host, &full, ra);
123e4d5bf4fSRichard Henderson     assert(!(flags & TLB_INVALID_MASK));
124e4d5bf4fSRichard Henderson 
125e4d5bf4fSRichard Henderson     /* If the virtual page MemAttr != Tagged, access unchecked. */
126b8967ddfSRichard Henderson     if (full->pte_attrs != 0xf0) {
127e4d5bf4fSRichard Henderson         return NULL;
128e4d5bf4fSRichard Henderson     }
129e4d5bf4fSRichard Henderson 
130e4d5bf4fSRichard Henderson     /*
131e4d5bf4fSRichard Henderson      * If not backed by host ram, there is no tag storage: access unchecked.
132e4d5bf4fSRichard Henderson      * This is probably a guest os bug though, so log it.
133e4d5bf4fSRichard Henderson      */
134e4d5bf4fSRichard Henderson     if (unlikely(flags & TLB_MMIO)) {
135e4d5bf4fSRichard Henderson         qemu_log_mask(LOG_GUEST_ERROR,
136e4d5bf4fSRichard Henderson                       "Page @ 0x%" PRIx64 " indicates Tagged Normal memory "
137e4d5bf4fSRichard Henderson                       "but is not backed by host ram\n", ptr);
138e4d5bf4fSRichard Henderson         return NULL;
139e4d5bf4fSRichard Henderson     }
140e4d5bf4fSRichard Henderson 
141e4d5bf4fSRichard Henderson     /*
142b8967ddfSRichard Henderson      * Remember these values across the second lookup below,
143b8967ddfSRichard Henderson      * which may invalidate this pointer via tlb resize.
144b8967ddfSRichard Henderson      */
14528fb921fSRichard Henderson     ptr_paddr = full->phys_addr | (ptr & ~TARGET_PAGE_MASK);
146b8967ddfSRichard Henderson     attrs = full->attrs;
147b8967ddfSRichard Henderson     full = NULL;
148b8967ddfSRichard Henderson 
149b8967ddfSRichard Henderson     /*
150e4d5bf4fSRichard Henderson      * The Normal memory access can extend to the next page.  E.g. a single
151e4d5bf4fSRichard Henderson      * 8-byte access to the last byte of a page will check only the last
152e4d5bf4fSRichard Henderson      * tag on the first page.
153e4d5bf4fSRichard Henderson      * Any page access exception has priority over tag check exception.
154e4d5bf4fSRichard Henderson      */
155e4d5bf4fSRichard Henderson     in_page = -(ptr | TARGET_PAGE_MASK);
156e4d5bf4fSRichard Henderson     if (unlikely(ptr_size > in_page)) {
157*d507e6c5SRichard Henderson         flags |= probe_access_full(env, ptr + in_page, 0, ptr_access,
158b8967ddfSRichard Henderson                                    ptr_mmu_idx, ra == 0, &host, &full, ra);
159e4d5bf4fSRichard Henderson         assert(!(flags & TLB_INVALID_MASK));
160e4d5bf4fSRichard Henderson     }
161e4d5bf4fSRichard Henderson 
162e4d5bf4fSRichard Henderson     /* Any debug exception has priority over a tag check exception. */
163e4d5bf4fSRichard Henderson     if (unlikely(flags & TLB_WATCHPOINT)) {
164e4d5bf4fSRichard Henderson         int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE;
165e4d5bf4fSRichard Henderson         assert(ra != 0);
166b8967ddfSRichard Henderson         cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, attrs, wp, ra);
167e4d5bf4fSRichard Henderson     }
168e4d5bf4fSRichard Henderson 
169e4d5bf4fSRichard Henderson     /* Convert to the physical address in tag space.  */
170e4d5bf4fSRichard Henderson     tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1);
171e4d5bf4fSRichard Henderson 
172e4d5bf4fSRichard Henderson     /* Look up the address in tag space. */
173b8967ddfSRichard Henderson     tag_asi = attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS;
174e4d5bf4fSRichard Henderson     tag_as = cpu_get_address_space(env_cpu(env), tag_asi);
175e4d5bf4fSRichard Henderson     mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL,
176b8967ddfSRichard Henderson                                  tag_access == MMU_DATA_STORE, attrs);
177e4d5bf4fSRichard Henderson 
178e4d5bf4fSRichard Henderson     /*
179e4d5bf4fSRichard Henderson      * Note that @mr will never be NULL.  If there is nothing in the address
180e4d5bf4fSRichard Henderson      * space at @tag_paddr, the translation will return the unallocated memory
181e4d5bf4fSRichard Henderson      * region.  For our purposes, the result must be ram.
182e4d5bf4fSRichard Henderson      */
183e4d5bf4fSRichard Henderson     if (unlikely(!memory_region_is_ram(mr))) {
184e4d5bf4fSRichard Henderson         /* ??? Failure is a board configuration error. */
185e4d5bf4fSRichard Henderson         qemu_log_mask(LOG_UNIMP,
186e4d5bf4fSRichard Henderson                       "Tag Memory @ 0x%" HWADDR_PRIx " not found for "
187e4d5bf4fSRichard Henderson                       "Normal Memory @ 0x%" HWADDR_PRIx "\n",
188e4d5bf4fSRichard Henderson                       tag_paddr, ptr_paddr);
189e4d5bf4fSRichard Henderson         return NULL;
190e4d5bf4fSRichard Henderson     }
191e4d5bf4fSRichard Henderson 
192e4d5bf4fSRichard Henderson     /*
193e4d5bf4fSRichard Henderson      * Ensure the tag memory is dirty on write, for migration.
194e4d5bf4fSRichard Henderson      * Tag memory can never contain code or display memory (vga).
195e4d5bf4fSRichard Henderson      */
196e4d5bf4fSRichard Henderson     if (tag_access == MMU_DATA_STORE) {
197e4d5bf4fSRichard Henderson         ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
198e4d5bf4fSRichard Henderson         cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
199e4d5bf4fSRichard Henderson     }
200e4d5bf4fSRichard Henderson 
201e4d5bf4fSRichard Henderson     return memory_region_get_ram_ptr(mr) + xlat;
202e4d5bf4fSRichard Henderson #endif
203c15294c1SRichard Henderson }
204c15294c1SRichard Henderson 
205da54941fSRichard Henderson uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm)
206da54941fSRichard Henderson {
207da54941fSRichard Henderson     uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16);
208d4f6dda1SRichard Henderson     int rrnd = extract32(env->cp15.gcr_el1, 16, 1);
209da54941fSRichard Henderson     int start = extract32(env->cp15.rgsr_el1, 0, 4);
210da54941fSRichard Henderson     int seed = extract32(env->cp15.rgsr_el1, 8, 16);
211d4f6dda1SRichard Henderson     int offset, i, rtag;
212d4f6dda1SRichard Henderson 
213d4f6dda1SRichard Henderson     /*
214d4f6dda1SRichard Henderson      * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the
215d4f6dda1SRichard Henderson      * deterministic algorithm.  Except that with RRND==1 the kernel is
216d4f6dda1SRichard Henderson      * not required to have set RGSR_EL1.SEED != 0, which is required for
217d4f6dda1SRichard Henderson      * the deterministic algorithm to function.  So we force a non-zero
218d4f6dda1SRichard Henderson      * SEED for that case.
219d4f6dda1SRichard Henderson      */
220d4f6dda1SRichard Henderson     if (unlikely(seed == 0) && rrnd) {
221d4f6dda1SRichard Henderson         do {
222d4f6dda1SRichard Henderson             Error *err = NULL;
223d4f6dda1SRichard Henderson             uint16_t two;
224d4f6dda1SRichard Henderson 
225d4f6dda1SRichard Henderson             if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) {
226d4f6dda1SRichard Henderson                 /*
227d4f6dda1SRichard Henderson                  * Failed, for unknown reasons in the crypto subsystem.
228d4f6dda1SRichard Henderson                  * Best we can do is log the reason and use a constant seed.
229d4f6dda1SRichard Henderson                  */
230d4f6dda1SRichard Henderson                 qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n",
231d4f6dda1SRichard Henderson                               error_get_pretty(err));
232d4f6dda1SRichard Henderson                 error_free(err);
233d4f6dda1SRichard Henderson                 two = 1;
234d4f6dda1SRichard Henderson             }
235d4f6dda1SRichard Henderson             seed = two;
236d4f6dda1SRichard Henderson         } while (seed == 0);
237d4f6dda1SRichard Henderson     }
238da54941fSRichard Henderson 
239da54941fSRichard Henderson     /* RandomTag */
240da54941fSRichard Henderson     for (i = offset = 0; i < 4; ++i) {
241da54941fSRichard Henderson         /* NextRandomTagBit */
242da54941fSRichard Henderson         int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^
243da54941fSRichard Henderson                    extract32(seed, 2, 1) ^ extract32(seed, 0, 1));
244da54941fSRichard Henderson         seed = (top << 15) | (seed >> 1);
245da54941fSRichard Henderson         offset |= top << i;
246da54941fSRichard Henderson     }
247da54941fSRichard Henderson     rtag = choose_nonexcluded_tag(start, offset, exclude);
248da54941fSRichard Henderson     env->cp15.rgsr_el1 = rtag | (seed << 8);
249da54941fSRichard Henderson 
250da54941fSRichard Henderson     return address_with_allocation_tag(rn, rtag);
251da54941fSRichard Henderson }
252efbc78adSRichard Henderson 
253efbc78adSRichard Henderson uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr,
254efbc78adSRichard Henderson                          int32_t offset, uint32_t tag_offset)
255efbc78adSRichard Henderson {
256efbc78adSRichard Henderson     int start_tag = allocation_tag_from_addr(ptr);
257efbc78adSRichard Henderson     uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16);
258efbc78adSRichard Henderson     int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude);
259efbc78adSRichard Henderson 
260efbc78adSRichard Henderson     return address_with_allocation_tag(ptr + offset, rtag);
261efbc78adSRichard Henderson }
262c15294c1SRichard Henderson 
263c15294c1SRichard Henderson static int load_tag1(uint64_t ptr, uint8_t *mem)
264c15294c1SRichard Henderson {
265c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
266c15294c1SRichard Henderson     return extract32(*mem, ofs, 4);
267c15294c1SRichard Henderson }
268c15294c1SRichard Henderson 
269c15294c1SRichard Henderson uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
270c15294c1SRichard Henderson {
271c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
272c15294c1SRichard Henderson     uint8_t *mem;
273c15294c1SRichard Henderson     int rtag = 0;
274c15294c1SRichard Henderson 
275c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
276c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1,
277c15294c1SRichard Henderson                              MMU_DATA_LOAD, 1, GETPC());
278c15294c1SRichard Henderson 
279c15294c1SRichard Henderson     /* Load if page supports tags. */
280c15294c1SRichard Henderson     if (mem) {
281c15294c1SRichard Henderson         rtag = load_tag1(ptr, mem);
282c15294c1SRichard Henderson     }
283c15294c1SRichard Henderson 
284c15294c1SRichard Henderson     return address_with_allocation_tag(xt, rtag);
285c15294c1SRichard Henderson }
286c15294c1SRichard Henderson 
287c15294c1SRichard Henderson static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra)
288c15294c1SRichard Henderson {
289c15294c1SRichard Henderson     if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) {
290c15294c1SRichard Henderson         arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
291c15294c1SRichard Henderson                                     cpu_mmu_index(env, false), ra);
292c15294c1SRichard Henderson         g_assert_not_reached();
293c15294c1SRichard Henderson     }
294c15294c1SRichard Henderson }
295c15294c1SRichard Henderson 
296c15294c1SRichard Henderson /* For use in a non-parallel context, store to the given nibble.  */
297c15294c1SRichard Henderson static void store_tag1(uint64_t ptr, uint8_t *mem, int tag)
298c15294c1SRichard Henderson {
299c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
300c15294c1SRichard Henderson     *mem = deposit32(*mem, ofs, 4, tag);
301c15294c1SRichard Henderson }
302c15294c1SRichard Henderson 
303c15294c1SRichard Henderson /* For use in a parallel context, atomically store to the given nibble.  */
304c15294c1SRichard Henderson static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag)
305c15294c1SRichard Henderson {
306c15294c1SRichard Henderson     int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4;
307d73415a3SStefan Hajnoczi     uint8_t old = qatomic_read(mem);
308c15294c1SRichard Henderson 
309c15294c1SRichard Henderson     while (1) {
310c15294c1SRichard Henderson         uint8_t new = deposit32(old, ofs, 4, tag);
311d73415a3SStefan Hajnoczi         uint8_t cmp = qatomic_cmpxchg(mem, old, new);
312c15294c1SRichard Henderson         if (likely(cmp == old)) {
313c15294c1SRichard Henderson             return;
314c15294c1SRichard Henderson         }
315c15294c1SRichard Henderson         old = cmp;
316c15294c1SRichard Henderson     }
317c15294c1SRichard Henderson }
318c15294c1SRichard Henderson 
319c15294c1SRichard Henderson typedef void stg_store1(uint64_t, uint8_t *, int);
320c15294c1SRichard Henderson 
321c15294c1SRichard Henderson static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt,
322c15294c1SRichard Henderson                           uintptr_t ra, stg_store1 store1)
323c15294c1SRichard Henderson {
324c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
325c15294c1SRichard Henderson     uint8_t *mem;
326c15294c1SRichard Henderson 
327c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
328c15294c1SRichard Henderson 
329c15294c1SRichard Henderson     /* Trap if accessing an invalid page.  */
330c15294c1SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE,
331c15294c1SRichard Henderson                              MMU_DATA_STORE, 1, ra);
332c15294c1SRichard Henderson 
333c15294c1SRichard Henderson     /* Store if page supports tags. */
334c15294c1SRichard Henderson     if (mem) {
335c15294c1SRichard Henderson         store1(ptr, mem, allocation_tag_from_addr(xt));
336c15294c1SRichard Henderson     }
337c15294c1SRichard Henderson }
338c15294c1SRichard Henderson 
339c15294c1SRichard Henderson void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt)
340c15294c1SRichard Henderson {
341c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1);
342c15294c1SRichard Henderson }
343c15294c1SRichard Henderson 
344c15294c1SRichard Henderson void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
345c15294c1SRichard Henderson {
346c15294c1SRichard Henderson     do_stg(env, ptr, xt, GETPC(), store_tag1_parallel);
347c15294c1SRichard Henderson }
348c15294c1SRichard Henderson 
349c15294c1SRichard Henderson void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr)
350c15294c1SRichard Henderson {
351c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
352c15294c1SRichard Henderson     uintptr_t ra = GETPC();
353c15294c1SRichard Henderson 
354c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
355c15294c1SRichard Henderson     probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
356c15294c1SRichard Henderson }
357c15294c1SRichard Henderson 
358c15294c1SRichard Henderson static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt,
359c15294c1SRichard Henderson                            uintptr_t ra, stg_store1 store1)
360c15294c1SRichard Henderson {
361c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
362c15294c1SRichard Henderson     int tag = allocation_tag_from_addr(xt);
363c15294c1SRichard Henderson     uint8_t *mem1, *mem2;
364c15294c1SRichard Henderson 
365c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
366c15294c1SRichard Henderson 
367c15294c1SRichard Henderson     /*
368c15294c1SRichard Henderson      * Trap if accessing an invalid page(s).
369c15294c1SRichard Henderson      * This takes priority over !allocation_tag_access_enabled.
370c15294c1SRichard Henderson      */
371c15294c1SRichard Henderson     if (ptr & TAG_GRANULE) {
372c15294c1SRichard Henderson         /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */
373c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
374c15294c1SRichard Henderson                                   TAG_GRANULE, MMU_DATA_STORE, 1, ra);
375c15294c1SRichard Henderson         mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE,
376c15294c1SRichard Henderson                                   MMU_DATA_STORE, TAG_GRANULE,
377c15294c1SRichard Henderson                                   MMU_DATA_STORE, 1, ra);
378c15294c1SRichard Henderson 
379c15294c1SRichard Henderson         /* Store if page(s) support tags. */
380c15294c1SRichard Henderson         if (mem1) {
381c15294c1SRichard Henderson             store1(TAG_GRANULE, mem1, tag);
382c15294c1SRichard Henderson         }
383c15294c1SRichard Henderson         if (mem2) {
384c15294c1SRichard Henderson             store1(0, mem2, tag);
385c15294c1SRichard Henderson         }
386c15294c1SRichard Henderson     } else {
387c15294c1SRichard Henderson         /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */
388c15294c1SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
389c15294c1SRichard Henderson                                   2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra);
390c15294c1SRichard Henderson         if (mem1) {
391c15294c1SRichard Henderson             tag |= tag << 4;
392d73415a3SStefan Hajnoczi             qatomic_set(mem1, tag);
393c15294c1SRichard Henderson         }
394c15294c1SRichard Henderson     }
395c15294c1SRichard Henderson }
396c15294c1SRichard Henderson 
397c15294c1SRichard Henderson void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt)
398c15294c1SRichard Henderson {
399c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1);
400c15294c1SRichard Henderson }
401c15294c1SRichard Henderson 
402c15294c1SRichard Henderson void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt)
403c15294c1SRichard Henderson {
404c15294c1SRichard Henderson     do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel);
405c15294c1SRichard Henderson }
406c15294c1SRichard Henderson 
407c15294c1SRichard Henderson void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
408c15294c1SRichard Henderson {
409c15294c1SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
410c15294c1SRichard Henderson     uintptr_t ra = GETPC();
411c15294c1SRichard Henderson     int in_page = -(ptr | TARGET_PAGE_MASK);
412c15294c1SRichard Henderson 
413c15294c1SRichard Henderson     check_tag_aligned(env, ptr, ra);
414c15294c1SRichard Henderson 
415c15294c1SRichard Henderson     if (likely(in_page >= 2 * TAG_GRANULE)) {
416c15294c1SRichard Henderson         probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra);
417c15294c1SRichard Henderson     } else {
418c15294c1SRichard Henderson         probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra);
419c15294c1SRichard Henderson         probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra);
420c15294c1SRichard Henderson     }
421c15294c1SRichard Henderson }
4225f716a82SRichard Henderson 
4235f716a82SRichard Henderson #define LDGM_STGM_SIZE  (4 << GMID_EL1_BS)
4245f716a82SRichard Henderson 
4255f716a82SRichard Henderson uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
4265f716a82SRichard Henderson {
4275f716a82SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
4285f716a82SRichard Henderson     uintptr_t ra = GETPC();
4295f716a82SRichard Henderson     void *tag_mem;
4305f716a82SRichard Henderson 
4315f716a82SRichard Henderson     ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
4325f716a82SRichard Henderson 
4335f716a82SRichard Henderson     /* Trap if accessing an invalid page.  */
4345f716a82SRichard Henderson     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
4355f716a82SRichard Henderson                                  LDGM_STGM_SIZE, MMU_DATA_LOAD,
4365f716a82SRichard Henderson                                  LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
4375f716a82SRichard Henderson 
4385f716a82SRichard Henderson     /* The tag is squashed to zero if the page does not support tags.  */
4395f716a82SRichard Henderson     if (!tag_mem) {
4405f716a82SRichard Henderson         return 0;
4415f716a82SRichard Henderson     }
4425f716a82SRichard Henderson 
4435f716a82SRichard Henderson     QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
4445f716a82SRichard Henderson     /*
4455f716a82SRichard Henderson      * We are loading 64-bits worth of tags.  The ordering of elements
4465f716a82SRichard Henderson      * within the word corresponds to a 64-bit little-endian operation.
4475f716a82SRichard Henderson      */
4485f716a82SRichard Henderson     return ldq_le_p(tag_mem);
4495f716a82SRichard Henderson }
4505f716a82SRichard Henderson 
4515f716a82SRichard Henderson void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
4525f716a82SRichard Henderson {
4535f716a82SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
4545f716a82SRichard Henderson     uintptr_t ra = GETPC();
4555f716a82SRichard Henderson     void *tag_mem;
4565f716a82SRichard Henderson 
4575f716a82SRichard Henderson     ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
4585f716a82SRichard Henderson 
4595f716a82SRichard Henderson     /* Trap if accessing an invalid page.  */
4605f716a82SRichard Henderson     tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
4615f716a82SRichard Henderson                                  LDGM_STGM_SIZE, MMU_DATA_LOAD,
4625f716a82SRichard Henderson                                  LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
4635f716a82SRichard Henderson 
4645f716a82SRichard Henderson     /*
4655f716a82SRichard Henderson      * Tag store only happens if the page support tags,
4665f716a82SRichard Henderson      * and if the OS has enabled access to the tags.
4675f716a82SRichard Henderson      */
4685f716a82SRichard Henderson     if (!tag_mem) {
4695f716a82SRichard Henderson         return;
4705f716a82SRichard Henderson     }
4715f716a82SRichard Henderson 
4725f716a82SRichard Henderson     QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
4735f716a82SRichard Henderson     /*
4745f716a82SRichard Henderson      * We are storing 64-bits worth of tags.  The ordering of elements
4755f716a82SRichard Henderson      * within the word corresponds to a 64-bit little-endian operation.
4765f716a82SRichard Henderson      */
4775f716a82SRichard Henderson     stq_le_p(tag_mem, val);
4785f716a82SRichard Henderson }
4795f716a82SRichard Henderson 
4805f716a82SRichard Henderson void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
4815f716a82SRichard Henderson {
4825f716a82SRichard Henderson     uintptr_t ra = GETPC();
4835f716a82SRichard Henderson     int mmu_idx = cpu_mmu_index(env, false);
4845f716a82SRichard Henderson     int log2_dcz_bytes, log2_tag_bytes;
4855f716a82SRichard Henderson     intptr_t dcz_bytes, tag_bytes;
4865f716a82SRichard Henderson     uint8_t *mem;
4875f716a82SRichard Henderson 
4885f716a82SRichard Henderson     /*
4895f716a82SRichard Henderson      * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1,
4905f716a82SRichard Henderson      * i.e. 32 bytes, which is an unreasonably small dcz anyway,
4915f716a82SRichard Henderson      * to make sure that we can access one complete tag byte here.
4925f716a82SRichard Henderson      */
4935f716a82SRichard Henderson     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
4945f716a82SRichard Henderson     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
4955f716a82SRichard Henderson     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
4965f716a82SRichard Henderson     tag_bytes = (intptr_t)1 << log2_tag_bytes;
4975f716a82SRichard Henderson     ptr &= -dcz_bytes;
4985f716a82SRichard Henderson 
4995f716a82SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes,
5005f716a82SRichard Henderson                              MMU_DATA_STORE, tag_bytes, ra);
5015f716a82SRichard Henderson     if (mem) {
5025f716a82SRichard Henderson         int tag_pair = (val & 0xf) * 0x11;
5035f716a82SRichard Henderson         memset(mem, tag_pair, tag_bytes);
5045f716a82SRichard Henderson     }
5055f716a82SRichard Henderson }
5060a405be2SRichard Henderson 
50786f0d4c7SPeter Collingbourne static void mte_sync_check_fail(CPUARMState *env, uint32_t desc,
50886f0d4c7SPeter Collingbourne                                 uint64_t dirty_ptr, uintptr_t ra)
50986f0d4c7SPeter Collingbourne {
51086f0d4c7SPeter Collingbourne     int is_write, syn;
51186f0d4c7SPeter Collingbourne 
51286f0d4c7SPeter Collingbourne     env->exception.vaddress = dirty_ptr;
51386f0d4c7SPeter Collingbourne 
51486f0d4c7SPeter Collingbourne     is_write = FIELD_EX32(desc, MTEDESC, WRITE);
51586f0d4c7SPeter Collingbourne     syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, is_write,
51686f0d4c7SPeter Collingbourne                                 0x11);
51786f0d4c7SPeter Collingbourne     raise_exception_ra(env, EXCP_DATA_ABORT, syn, exception_target_el(env), ra);
51886f0d4c7SPeter Collingbourne     g_assert_not_reached();
51986f0d4c7SPeter Collingbourne }
52086f0d4c7SPeter Collingbourne 
52186f0d4c7SPeter Collingbourne static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
52286f0d4c7SPeter Collingbourne                                  uintptr_t ra, ARMMMUIdx arm_mmu_idx, int el)
52386f0d4c7SPeter Collingbourne {
52486f0d4c7SPeter Collingbourne     int select;
52586f0d4c7SPeter Collingbourne 
52686f0d4c7SPeter Collingbourne     if (regime_has_2_ranges(arm_mmu_idx)) {
52786f0d4c7SPeter Collingbourne         select = extract64(dirty_ptr, 55, 1);
52886f0d4c7SPeter Collingbourne     } else {
52986f0d4c7SPeter Collingbourne         select = 0;
53086f0d4c7SPeter Collingbourne     }
53186f0d4c7SPeter Collingbourne     env->cp15.tfsr_el[el] |= 1 << select;
53286f0d4c7SPeter Collingbourne #ifdef CONFIG_USER_ONLY
53386f0d4c7SPeter Collingbourne     /*
53486f0d4c7SPeter Collingbourne      * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT,
53586f0d4c7SPeter Collingbourne      * which then sends a SIGSEGV when the thread is next scheduled.
53686f0d4c7SPeter Collingbourne      * This cpu will return to the main loop at the end of the TB,
53786f0d4c7SPeter Collingbourne      * which is rather sooner than "normal".  But the alternative
53886f0d4c7SPeter Collingbourne      * is waiting until the next syscall.
53986f0d4c7SPeter Collingbourne      */
54086f0d4c7SPeter Collingbourne     qemu_cpu_kick(env_cpu(env));
54186f0d4c7SPeter Collingbourne #endif
54286f0d4c7SPeter Collingbourne }
54386f0d4c7SPeter Collingbourne 
5442e34ff45SRichard Henderson /* Record a tag check failure.  */
545dbf8c321SRichard Henderson static void mte_check_fail(CPUARMState *env, uint32_t desc,
5462e34ff45SRichard Henderson                            uint64_t dirty_ptr, uintptr_t ra)
5472e34ff45SRichard Henderson {
548dbf8c321SRichard Henderson     int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
5492e34ff45SRichard Henderson     ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx);
55086f0d4c7SPeter Collingbourne     int el, reg_el, tcf;
5512e34ff45SRichard Henderson     uint64_t sctlr;
5522e34ff45SRichard Henderson 
5532e34ff45SRichard Henderson     reg_el = regime_el(env, arm_mmu_idx);
5542e34ff45SRichard Henderson     sctlr = env->cp15.sctlr_el[reg_el];
5552e34ff45SRichard Henderson 
5562d928adfSPeter Collingbourne     switch (arm_mmu_idx) {
5572d928adfSPeter Collingbourne     case ARMMMUIdx_E10_0:
5582d928adfSPeter Collingbourne     case ARMMMUIdx_E20_0:
5592d928adfSPeter Collingbourne         el = 0;
5602e34ff45SRichard Henderson         tcf = extract64(sctlr, 38, 2);
5612d928adfSPeter Collingbourne         break;
5622d928adfSPeter Collingbourne     default:
5632d928adfSPeter Collingbourne         el = reg_el;
5642e34ff45SRichard Henderson         tcf = extract64(sctlr, 40, 2);
5652e34ff45SRichard Henderson     }
5662e34ff45SRichard Henderson 
5672e34ff45SRichard Henderson     switch (tcf) {
5682e34ff45SRichard Henderson     case 1:
5695bf100c3SJamie Iles         /* Tag check fail causes a synchronous exception. */
57086f0d4c7SPeter Collingbourne         mte_sync_check_fail(env, desc, dirty_ptr, ra);
57186f0d4c7SPeter Collingbourne         break;
5722e34ff45SRichard Henderson 
5732e34ff45SRichard Henderson     case 0:
5742e34ff45SRichard Henderson         /*
5752e34ff45SRichard Henderson          * Tag check fail does not affect the PE.
5762e34ff45SRichard Henderson          * We eliminate this case by not setting MTE_ACTIVE
5772e34ff45SRichard Henderson          * in tb_flags, so that we never make this runtime call.
5782e34ff45SRichard Henderson          */
5792e34ff45SRichard Henderson         g_assert_not_reached();
5802e34ff45SRichard Henderson 
5812e34ff45SRichard Henderson     case 2:
5822e34ff45SRichard Henderson         /* Tag check fail causes asynchronous flag set.  */
58386f0d4c7SPeter Collingbourne         mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
5842e34ff45SRichard Henderson         break;
5852e34ff45SRichard Henderson 
58686f0d4c7SPeter Collingbourne     case 3:
58786f0d4c7SPeter Collingbourne         /*
58886f0d4c7SPeter Collingbourne          * Tag check fail causes asynchronous flag set for stores, or
58986f0d4c7SPeter Collingbourne          * a synchronous exception for loads.
59086f0d4c7SPeter Collingbourne          */
59186f0d4c7SPeter Collingbourne         if (FIELD_EX32(desc, MTEDESC, WRITE)) {
59286f0d4c7SPeter Collingbourne             mte_async_check_fail(env, dirty_ptr, ra, arm_mmu_idx, el);
59386f0d4c7SPeter Collingbourne         } else {
59486f0d4c7SPeter Collingbourne             mte_sync_check_fail(env, desc, dirty_ptr, ra);
59586f0d4c7SPeter Collingbourne         }
5962e34ff45SRichard Henderson         break;
5972e34ff45SRichard Henderson     }
5982e34ff45SRichard Henderson }
5992e34ff45SRichard Henderson 
6005add8248SRichard Henderson /**
6015add8248SRichard Henderson  * checkN:
6025add8248SRichard Henderson  * @tag: tag memory to test
6035add8248SRichard Henderson  * @odd: true to begin testing at tags at odd nibble
6045add8248SRichard Henderson  * @cmp: the tag to compare against
6055add8248SRichard Henderson  * @count: number of tags to test
6065add8248SRichard Henderson  *
6075add8248SRichard Henderson  * Return the number of successful tests.
6085add8248SRichard Henderson  * Thus a return value < @count indicates a failure.
6095add8248SRichard Henderson  *
6105add8248SRichard Henderson  * A note about sizes: count is expected to be small.
6115add8248SRichard Henderson  *
6125add8248SRichard Henderson  * The most common use will be LDP/STP of two integer registers,
6135add8248SRichard Henderson  * which means 16 bytes of memory touching at most 2 tags, but
6145add8248SRichard Henderson  * often the access is aligned and thus just 1 tag.
6155add8248SRichard Henderson  *
6165add8248SRichard Henderson  * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory,
6175add8248SRichard Henderson  * touching at most 5 tags.  SVE LDR/STR (vector) with the default
6185add8248SRichard Henderson  * vector length is also 64 bytes; the maximum architectural length
6195add8248SRichard Henderson  * is 256 bytes touching at most 9 tags.
6205add8248SRichard Henderson  *
6215add8248SRichard Henderson  * The loop below uses 7 logical operations and 1 memory operation
6225add8248SRichard Henderson  * per tag pair.  An implementation that loads an aligned word and
6235add8248SRichard Henderson  * uses masking to ignore adjacent tags requires 18 logical operations
6245add8248SRichard Henderson  * and thus does not begin to pay off until 6 tags.
6255add8248SRichard Henderson  * Which, according to the survey above, is unlikely to be common.
6265add8248SRichard Henderson  */
6275add8248SRichard Henderson static int checkN(uint8_t *mem, int odd, int cmp, int count)
6285add8248SRichard Henderson {
6295add8248SRichard Henderson     int n = 0, diff;
6305add8248SRichard Henderson 
6315add8248SRichard Henderson     /* Replicate the test tag and compare.  */
6325add8248SRichard Henderson     cmp *= 0x11;
6335add8248SRichard Henderson     diff = *mem++ ^ cmp;
6345add8248SRichard Henderson 
6355add8248SRichard Henderson     if (odd) {
6365add8248SRichard Henderson         goto start_odd;
6375add8248SRichard Henderson     }
6385add8248SRichard Henderson 
6395add8248SRichard Henderson     while (1) {
6405add8248SRichard Henderson         /* Test even tag. */
6415add8248SRichard Henderson         if (unlikely((diff) & 0x0f)) {
6425add8248SRichard Henderson             break;
6435add8248SRichard Henderson         }
6445add8248SRichard Henderson         if (++n == count) {
6455add8248SRichard Henderson             break;
6465add8248SRichard Henderson         }
6475add8248SRichard Henderson 
6485add8248SRichard Henderson     start_odd:
6495add8248SRichard Henderson         /* Test odd tag. */
6505add8248SRichard Henderson         if (unlikely((diff) & 0xf0)) {
6515add8248SRichard Henderson             break;
6525add8248SRichard Henderson         }
6535add8248SRichard Henderson         if (++n == count) {
6545add8248SRichard Henderson             break;
6555add8248SRichard Henderson         }
6565add8248SRichard Henderson 
6575add8248SRichard Henderson         diff = *mem++ ^ cmp;
6585add8248SRichard Henderson     }
6595add8248SRichard Henderson     return n;
6605add8248SRichard Henderson }
6615add8248SRichard Henderson 
662f8c8a860SRichard Henderson /**
663f8c8a860SRichard Henderson  * mte_probe_int() - helper for mte_probe and mte_check
664f8c8a860SRichard Henderson  * @env: CPU environment
665f8c8a860SRichard Henderson  * @desc: MTEDESC descriptor
666f8c8a860SRichard Henderson  * @ptr: virtual address of the base of the access
667f8c8a860SRichard Henderson  * @fault: return virtual address of the first check failure
668f8c8a860SRichard Henderson  *
669f8c8a860SRichard Henderson  * Internal routine for both mte_probe and mte_check.
670f8c8a860SRichard Henderson  * Return zero on failure, filling in *fault.
671f8c8a860SRichard Henderson  * Return negative on trivial success for tbi disabled.
672f8c8a860SRichard Henderson  * Return positive on success with tbi enabled.
673f8c8a860SRichard Henderson  */
674f8c8a860SRichard Henderson static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr,
67528f32503SRichard Henderson                          uintptr_t ra, uint64_t *fault)
6765add8248SRichard Henderson {
6775add8248SRichard Henderson     int mmu_idx, ptr_tag, bit55;
67898f96050SRichard Henderson     uint64_t ptr_last, prev_page, next_page;
67998f96050SRichard Henderson     uint64_t tag_first, tag_last;
68098f96050SRichard Henderson     uint64_t tag_byte_first, tag_byte_last;
68128f32503SRichard Henderson     uint32_t sizem1, tag_count, tag_size, n, c;
6825add8248SRichard Henderson     uint8_t *mem1, *mem2;
6835add8248SRichard Henderson     MMUAccessType type;
6845add8248SRichard Henderson 
6855add8248SRichard Henderson     bit55 = extract64(ptr, 55, 1);
686f8c8a860SRichard Henderson     *fault = ptr;
6875add8248SRichard Henderson 
6885add8248SRichard Henderson     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
6895add8248SRichard Henderson     if (unlikely(!tbi_check(desc, bit55))) {
690f8c8a860SRichard Henderson         return -1;
6915add8248SRichard Henderson     }
6925add8248SRichard Henderson 
6935add8248SRichard Henderson     ptr_tag = allocation_tag_from_addr(ptr);
6945add8248SRichard Henderson 
6955add8248SRichard Henderson     if (tcma_check(desc, bit55, ptr_tag)) {
696f8c8a860SRichard Henderson         return 1;
6975add8248SRichard Henderson     }
6985add8248SRichard Henderson 
6995add8248SRichard Henderson     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
7005add8248SRichard Henderson     type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD;
70128f32503SRichard Henderson     sizem1 = FIELD_EX32(desc, MTEDESC, SIZEM1);
7025add8248SRichard Henderson 
70398f96050SRichard Henderson     /* Find the addr of the end of the access */
70428f32503SRichard Henderson     ptr_last = ptr + sizem1;
7055add8248SRichard Henderson 
7065add8248SRichard Henderson     /* Round the bounds to the tag granule, and compute the number of tags. */
7075add8248SRichard Henderson     tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE);
70898f96050SRichard Henderson     tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE);
70998f96050SRichard Henderson     tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1;
7105add8248SRichard Henderson 
7115add8248SRichard Henderson     /* Round the bounds to twice the tag granule, and compute the bytes. */
7125add8248SRichard Henderson     tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE);
71398f96050SRichard Henderson     tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE);
7145add8248SRichard Henderson 
7155add8248SRichard Henderson     /* Locate the page boundaries. */
7165add8248SRichard Henderson     prev_page = ptr & TARGET_PAGE_MASK;
7175add8248SRichard Henderson     next_page = prev_page + TARGET_PAGE_SIZE;
7185add8248SRichard Henderson 
719d3327a38SRichard Henderson     if (likely(tag_last - prev_page < TARGET_PAGE_SIZE)) {
7205add8248SRichard Henderson         /* Memory access stays on one page. */
72198f96050SRichard Henderson         tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1;
72228f32503SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, sizem1 + 1,
7235add8248SRichard Henderson                                   MMU_DATA_LOAD, tag_size, ra);
7245add8248SRichard Henderson         if (!mem1) {
725f8c8a860SRichard Henderson             return 1;
7265add8248SRichard Henderson         }
7275add8248SRichard Henderson         /* Perform all of the comparisons. */
7285add8248SRichard Henderson         n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count);
7295add8248SRichard Henderson     } else {
7305add8248SRichard Henderson         /* Memory access crosses to next page. */
7315add8248SRichard Henderson         tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE);
7325add8248SRichard Henderson         mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr,
7335add8248SRichard Henderson                                   MMU_DATA_LOAD, tag_size, ra);
7345add8248SRichard Henderson 
73598f96050SRichard Henderson         tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1;
7365add8248SRichard Henderson         mem2 = allocation_tag_mem(env, mmu_idx, next_page, type,
73798f96050SRichard Henderson                                   ptr_last - next_page + 1,
7385add8248SRichard Henderson                                   MMU_DATA_LOAD, tag_size, ra);
7395add8248SRichard Henderson 
7405add8248SRichard Henderson         /*
7415add8248SRichard Henderson          * Perform all of the comparisons.
7425add8248SRichard Henderson          * Note the possible but unlikely case of the operation spanning
7435add8248SRichard Henderson          * two pages that do not both have tagging enabled.
7445add8248SRichard Henderson          */
7455add8248SRichard Henderson         n = c = (next_page - tag_first) / TAG_GRANULE;
7465add8248SRichard Henderson         if (mem1) {
7475add8248SRichard Henderson             n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c);
7485add8248SRichard Henderson         }
7495add8248SRichard Henderson         if (n == c) {
7505add8248SRichard Henderson             if (!mem2) {
751f8c8a860SRichard Henderson                 return 1;
7525add8248SRichard Henderson             }
7535add8248SRichard Henderson             n += checkN(mem2, 0, ptr_tag, tag_count - c);
7545add8248SRichard Henderson         }
7555add8248SRichard Henderson     }
7565add8248SRichard Henderson 
757f8c8a860SRichard Henderson     if (likely(n == tag_count)) {
758f8c8a860SRichard Henderson         return 1;
759f8c8a860SRichard Henderson     }
760f8c8a860SRichard Henderson 
7615add8248SRichard Henderson     /*
76298f96050SRichard Henderson      * If we failed, we know which granule.  For the first granule, the
76398f96050SRichard Henderson      * failure address is @ptr, the first byte accessed.  Otherwise the
76498f96050SRichard Henderson      * failure address is the first byte of the nth granule.
7655add8248SRichard Henderson      */
766f8c8a860SRichard Henderson     if (n > 0) {
767f8c8a860SRichard Henderson         *fault = tag_first + n * TAG_GRANULE;
768f8c8a860SRichard Henderson     }
769f8c8a860SRichard Henderson     return 0;
7705add8248SRichard Henderson }
7715add8248SRichard Henderson 
772bd47b61cSRichard Henderson uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra)
773f8c8a860SRichard Henderson {
774f8c8a860SRichard Henderson     uint64_t fault;
77528f32503SRichard Henderson     int ret = mte_probe_int(env, desc, ptr, ra, &fault);
776f8c8a860SRichard Henderson 
777f8c8a860SRichard Henderson     if (unlikely(ret == 0)) {
778f8c8a860SRichard Henderson         mte_check_fail(env, desc, fault, ra);
779f8c8a860SRichard Henderson     } else if (ret < 0) {
780f8c8a860SRichard Henderson         return ptr;
781f8c8a860SRichard Henderson     }
7825add8248SRichard Henderson     return useronly_clean_ptr(ptr);
7835add8248SRichard Henderson }
7845add8248SRichard Henderson 
785bd47b61cSRichard Henderson uint64_t HELPER(mte_check)(CPUARMState *env, uint32_t desc, uint64_t ptr)
78673ceeb00SRichard Henderson {
787bd47b61cSRichard Henderson     return mte_check(env, desc, ptr, GETPC());
7884a09a213SRichard Henderson }
7894a09a213SRichard Henderson 
7904a09a213SRichard Henderson /*
791d304d280SRichard Henderson  * No-fault version of mte_check, to be used by SVE for MemSingleNF.
7924a09a213SRichard Henderson  * Returns false if the access is Checked and the check failed.  This
7934a09a213SRichard Henderson  * is only intended to probe the tag -- the validity of the page must
7944a09a213SRichard Henderson  * be checked beforehand.
7954a09a213SRichard Henderson  */
796d304d280SRichard Henderson bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr)
7974a09a213SRichard Henderson {
7984a09a213SRichard Henderson     uint64_t fault;
79928f32503SRichard Henderson     int ret = mte_probe_int(env, desc, ptr, 0, &fault);
8004a09a213SRichard Henderson 
8014a09a213SRichard Henderson     return ret != 0;
8024a09a213SRichard Henderson }
8034a09a213SRichard Henderson 
80446dc1bc0SRichard Henderson /*
80546dc1bc0SRichard Henderson  * Perform an MTE checked access for DC_ZVA.
80646dc1bc0SRichard Henderson  */
80746dc1bc0SRichard Henderson uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr)
80846dc1bc0SRichard Henderson {
80946dc1bc0SRichard Henderson     uintptr_t ra = GETPC();
81046dc1bc0SRichard Henderson     int log2_dcz_bytes, log2_tag_bytes;
81146dc1bc0SRichard Henderson     int mmu_idx, bit55;
81246dc1bc0SRichard Henderson     intptr_t dcz_bytes, tag_bytes, i;
81346dc1bc0SRichard Henderson     void *mem;
81446dc1bc0SRichard Henderson     uint64_t ptr_tag, mem_tag, align_ptr;
81546dc1bc0SRichard Henderson 
81646dc1bc0SRichard Henderson     bit55 = extract64(ptr, 55, 1);
81746dc1bc0SRichard Henderson 
81846dc1bc0SRichard Henderson     /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */
81946dc1bc0SRichard Henderson     if (unlikely(!tbi_check(desc, bit55))) {
82046dc1bc0SRichard Henderson         return ptr;
82146dc1bc0SRichard Henderson     }
82246dc1bc0SRichard Henderson 
82346dc1bc0SRichard Henderson     ptr_tag = allocation_tag_from_addr(ptr);
82446dc1bc0SRichard Henderson 
82546dc1bc0SRichard Henderson     if (tcma_check(desc, bit55, ptr_tag)) {
82646dc1bc0SRichard Henderson         goto done;
82746dc1bc0SRichard Henderson     }
82846dc1bc0SRichard Henderson 
82946dc1bc0SRichard Henderson     /*
83046dc1bc0SRichard Henderson      * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1,
83146dc1bc0SRichard Henderson      * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make
83246dc1bc0SRichard Henderson      * sure that we can access one complete tag byte here.
83346dc1bc0SRichard Henderson      */
83446dc1bc0SRichard Henderson     log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2;
83546dc1bc0SRichard Henderson     log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1);
83646dc1bc0SRichard Henderson     dcz_bytes = (intptr_t)1 << log2_dcz_bytes;
83746dc1bc0SRichard Henderson     tag_bytes = (intptr_t)1 << log2_tag_bytes;
83846dc1bc0SRichard Henderson     align_ptr = ptr & -dcz_bytes;
83946dc1bc0SRichard Henderson 
84046dc1bc0SRichard Henderson     /*
84146dc1bc0SRichard Henderson      * Trap if accessing an invalid page.  DC_ZVA requires that we supply
84246dc1bc0SRichard Henderson      * the original pointer for an invalid page.  But watchpoints require
84346dc1bc0SRichard Henderson      * that we probe the actual space.  So do both.
84446dc1bc0SRichard Henderson      */
84546dc1bc0SRichard Henderson     mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
84646dc1bc0SRichard Henderson     (void) probe_write(env, ptr, 1, mmu_idx, ra);
84746dc1bc0SRichard Henderson     mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE,
84846dc1bc0SRichard Henderson                              dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra);
84946dc1bc0SRichard Henderson     if (!mem) {
85046dc1bc0SRichard Henderson         goto done;
85146dc1bc0SRichard Henderson     }
85246dc1bc0SRichard Henderson 
85346dc1bc0SRichard Henderson     /*
85446dc1bc0SRichard Henderson      * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus
85546dc1bc0SRichard Henderson      * it is quite easy to perform all of the comparisons at once without
85646dc1bc0SRichard Henderson      * any extra masking.
85746dc1bc0SRichard Henderson      *
85846dc1bc0SRichard Henderson      * The most common zva block size is 64; some of the thunderx cpus use
85946dc1bc0SRichard Henderson      * a block size of 128.  For user-only, aarch64_max_initfn will set the
86046dc1bc0SRichard Henderson      * block size to 512.  Fill out the other cases for future-proofing.
86146dc1bc0SRichard Henderson      *
86246dc1bc0SRichard Henderson      * In order to be able to find the first miscompare later, we want the
86346dc1bc0SRichard Henderson      * tag bytes to be in little-endian order.
86446dc1bc0SRichard Henderson      */
86546dc1bc0SRichard Henderson     switch (log2_tag_bytes) {
86646dc1bc0SRichard Henderson     case 0: /* zva_blocksize 32 */
86746dc1bc0SRichard Henderson         mem_tag = *(uint8_t *)mem;
86846dc1bc0SRichard Henderson         ptr_tag *= 0x11u;
86946dc1bc0SRichard Henderson         break;
87046dc1bc0SRichard Henderson     case 1: /* zva_blocksize 64 */
87146dc1bc0SRichard Henderson         mem_tag = cpu_to_le16(*(uint16_t *)mem);
87246dc1bc0SRichard Henderson         ptr_tag *= 0x1111u;
87346dc1bc0SRichard Henderson         break;
87446dc1bc0SRichard Henderson     case 2: /* zva_blocksize 128 */
87546dc1bc0SRichard Henderson         mem_tag = cpu_to_le32(*(uint32_t *)mem);
87646dc1bc0SRichard Henderson         ptr_tag *= 0x11111111u;
87746dc1bc0SRichard Henderson         break;
87846dc1bc0SRichard Henderson     case 3: /* zva_blocksize 256 */
87946dc1bc0SRichard Henderson         mem_tag = cpu_to_le64(*(uint64_t *)mem);
88046dc1bc0SRichard Henderson         ptr_tag *= 0x1111111111111111ull;
88146dc1bc0SRichard Henderson         break;
88246dc1bc0SRichard Henderson 
88346dc1bc0SRichard Henderson     default: /* zva_blocksize 512, 1024, 2048 */
88446dc1bc0SRichard Henderson         ptr_tag *= 0x1111111111111111ull;
88546dc1bc0SRichard Henderson         i = 0;
88646dc1bc0SRichard Henderson         do {
88746dc1bc0SRichard Henderson             mem_tag = cpu_to_le64(*(uint64_t *)(mem + i));
88846dc1bc0SRichard Henderson             if (unlikely(mem_tag != ptr_tag)) {
88946dc1bc0SRichard Henderson                 goto fail;
89046dc1bc0SRichard Henderson             }
89146dc1bc0SRichard Henderson             i += 8;
89246dc1bc0SRichard Henderson             align_ptr += 16 * TAG_GRANULE;
89346dc1bc0SRichard Henderson         } while (i < tag_bytes);
89446dc1bc0SRichard Henderson         goto done;
89546dc1bc0SRichard Henderson     }
89646dc1bc0SRichard Henderson 
89746dc1bc0SRichard Henderson     if (likely(mem_tag == ptr_tag)) {
89846dc1bc0SRichard Henderson         goto done;
89946dc1bc0SRichard Henderson     }
90046dc1bc0SRichard Henderson 
90146dc1bc0SRichard Henderson  fail:
90246dc1bc0SRichard Henderson     /* Locate the first nibble that differs. */
90346dc1bc0SRichard Henderson     i = ctz64(mem_tag ^ ptr_tag) >> 4;
904dbf8c321SRichard Henderson     mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra);
90546dc1bc0SRichard Henderson 
90646dc1bc0SRichard Henderson  done:
90746dc1bc0SRichard Henderson     return useronly_clean_ptr(ptr);
90846dc1bc0SRichard Henderson }
909