1da54941fSRichard Henderson /* 2da54941fSRichard Henderson * ARM v8.5-MemTag Operations 3da54941fSRichard Henderson * 4da54941fSRichard Henderson * Copyright (c) 2020 Linaro, Ltd. 5da54941fSRichard Henderson * 6da54941fSRichard Henderson * This library is free software; you can redistribute it and/or 7da54941fSRichard Henderson * modify it under the terms of the GNU Lesser General Public 8da54941fSRichard Henderson * License as published by the Free Software Foundation; either 9da54941fSRichard Henderson * version 2.1 of the License, or (at your option) any later version. 10da54941fSRichard Henderson * 11da54941fSRichard Henderson * This library is distributed in the hope that it will be useful, 12da54941fSRichard Henderson * but WITHOUT ANY WARRANTY; without even the implied warranty of 13da54941fSRichard Henderson * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14da54941fSRichard Henderson * Lesser General Public License for more details. 15da54941fSRichard Henderson * 16da54941fSRichard Henderson * You should have received a copy of the GNU Lesser General Public 17da54941fSRichard Henderson * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18da54941fSRichard Henderson */ 19da54941fSRichard Henderson 20da54941fSRichard Henderson #include "qemu/osdep.h" 21da54941fSRichard Henderson #include "cpu.h" 22da54941fSRichard Henderson #include "internals.h" 23da54941fSRichard Henderson #include "exec/exec-all.h" 24e4d5bf4fSRichard Henderson #include "exec/ram_addr.h" 25da54941fSRichard Henderson #include "exec/cpu_ldst.h" 26da54941fSRichard Henderson #include "exec/helper-proto.h" 27d4f6dda1SRichard Henderson #include "qapi/error.h" 28d4f6dda1SRichard Henderson #include "qemu/guest-random.h" 29da54941fSRichard Henderson 30da54941fSRichard Henderson 31da54941fSRichard Henderson static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude) 32da54941fSRichard Henderson { 33da54941fSRichard Henderson if (exclude == 0xffff) { 34da54941fSRichard Henderson return 0; 35da54941fSRichard Henderson } 36da54941fSRichard Henderson if (offset == 0) { 37da54941fSRichard Henderson while (exclude & (1 << tag)) { 38da54941fSRichard Henderson tag = (tag + 1) & 15; 39da54941fSRichard Henderson } 40da54941fSRichard Henderson } else { 41da54941fSRichard Henderson do { 42da54941fSRichard Henderson do { 43da54941fSRichard Henderson tag = (tag + 1) & 15; 44da54941fSRichard Henderson } while (exclude & (1 << tag)); 45da54941fSRichard Henderson } while (--offset > 0); 46da54941fSRichard Henderson } 47da54941fSRichard Henderson return tag; 48da54941fSRichard Henderson } 49da54941fSRichard Henderson 50c15294c1SRichard Henderson /** 51c15294c1SRichard Henderson * allocation_tag_mem: 52c15294c1SRichard Henderson * @env: the cpu environment 53c15294c1SRichard Henderson * @ptr_mmu_idx: the addressing regime to use for the virtual address 54c15294c1SRichard Henderson * @ptr: the virtual address for which to look up tag memory 55c15294c1SRichard Henderson * @ptr_access: the access to use for the virtual address 56c15294c1SRichard Henderson * @ptr_size: the number of bytes in the normal memory access 57c15294c1SRichard Henderson * @tag_access: the access to use for the tag memory 58c15294c1SRichard Henderson * @tag_size: the number of bytes in the tag memory access 59c15294c1SRichard Henderson * @ra: the return address for exception handling 60c15294c1SRichard Henderson * 61c15294c1SRichard Henderson * Our tag memory is formatted as a sequence of little-endian nibbles. 62c15294c1SRichard Henderson * That is, the byte at (addr >> (LOG2_TAG_GRANULE + 1)) contains two 63c15294c1SRichard Henderson * tags, with the tag at [3:0] for the lower addr and the tag at [7:4] 64c15294c1SRichard Henderson * for the higher addr. 65c15294c1SRichard Henderson * 66c15294c1SRichard Henderson * Here, resolve the physical address from the virtual address, and return 67c15294c1SRichard Henderson * a pointer to the corresponding tag byte. Exit with exception if the 68c15294c1SRichard Henderson * virtual address is not accessible for @ptr_access. 69c15294c1SRichard Henderson * 70c15294c1SRichard Henderson * The @ptr_size and @tag_size values may not have an obvious relation 71c15294c1SRichard Henderson * due to the alignment of @ptr, and the number of tag checks required. 72c15294c1SRichard Henderson * 73c15294c1SRichard Henderson * If there is no tag storage corresponding to @ptr, return NULL. 74c15294c1SRichard Henderson */ 75c15294c1SRichard Henderson static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, 76c15294c1SRichard Henderson uint64_t ptr, MMUAccessType ptr_access, 77c15294c1SRichard Henderson int ptr_size, MMUAccessType tag_access, 78c15294c1SRichard Henderson int tag_size, uintptr_t ra) 79c15294c1SRichard Henderson { 80e4d5bf4fSRichard Henderson #ifdef CONFIG_USER_ONLY 81a11d3830SRichard Henderson uint64_t clean_ptr = useronly_clean_ptr(ptr); 82a11d3830SRichard Henderson int flags = page_get_flags(clean_ptr); 83a11d3830SRichard Henderson uint8_t *tags; 84a11d3830SRichard Henderson uintptr_t index; 85a11d3830SRichard Henderson 86ff38bca7SRichard Henderson if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { 87a11d3830SRichard Henderson /* SIGSEGV */ 88a11d3830SRichard Henderson arm_cpu_tlb_fill(env_cpu(env), ptr, ptr_size, ptr_access, 89a11d3830SRichard Henderson ptr_mmu_idx, false, ra); 90a11d3830SRichard Henderson g_assert_not_reached(); 91a11d3830SRichard Henderson } 92a11d3830SRichard Henderson 93a11d3830SRichard Henderson /* Require both MAP_ANON and PROT_MTE for the page. */ 94a11d3830SRichard Henderson if (!(flags & PAGE_ANON) || !(flags & PAGE_MTE)) { 95c15294c1SRichard Henderson return NULL; 96a11d3830SRichard Henderson } 97a11d3830SRichard Henderson 98a11d3830SRichard Henderson tags = page_get_target_data(clean_ptr); 99a11d3830SRichard Henderson if (tags == NULL) { 100a11d3830SRichard Henderson size_t alloc_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1); 101a11d3830SRichard Henderson tags = page_alloc_target_data(clean_ptr, alloc_size); 102a11d3830SRichard Henderson assert(tags != NULL); 103a11d3830SRichard Henderson } 104a11d3830SRichard Henderson 105a11d3830SRichard Henderson index = extract32(ptr, LOG2_TAG_GRANULE + 1, 106a11d3830SRichard Henderson TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1); 107a11d3830SRichard Henderson return tags + index; 108e4d5bf4fSRichard Henderson #else 109e4d5bf4fSRichard Henderson uintptr_t index; 110e4d5bf4fSRichard Henderson CPUIOTLBEntry *iotlbentry; 111e4d5bf4fSRichard Henderson int in_page, flags; 112e4d5bf4fSRichard Henderson ram_addr_t ptr_ra; 113e4d5bf4fSRichard Henderson hwaddr ptr_paddr, tag_paddr, xlat; 114e4d5bf4fSRichard Henderson MemoryRegion *mr; 115e4d5bf4fSRichard Henderson ARMASIdx tag_asi; 116e4d5bf4fSRichard Henderson AddressSpace *tag_as; 117e4d5bf4fSRichard Henderson void *host; 118e4d5bf4fSRichard Henderson 119e4d5bf4fSRichard Henderson /* 120e4d5bf4fSRichard Henderson * Probe the first byte of the virtual address. This raises an 121e4d5bf4fSRichard Henderson * exception for inaccessible pages, and resolves the virtual address 122e4d5bf4fSRichard Henderson * into the softmmu tlb. 123e4d5bf4fSRichard Henderson * 124e4d5bf4fSRichard Henderson * When RA == 0, this is for mte_probe1. The page is expected to be 125e4d5bf4fSRichard Henderson * valid. Indicate to probe_access_flags no-fault, then assert that 126e4d5bf4fSRichard Henderson * we received a valid page. 127e4d5bf4fSRichard Henderson */ 128e4d5bf4fSRichard Henderson flags = probe_access_flags(env, ptr, ptr_access, ptr_mmu_idx, 129e4d5bf4fSRichard Henderson ra == 0, &host, ra); 130e4d5bf4fSRichard Henderson assert(!(flags & TLB_INVALID_MASK)); 131e4d5bf4fSRichard Henderson 132e4d5bf4fSRichard Henderson /* 133e4d5bf4fSRichard Henderson * Find the iotlbentry for ptr. This *must* be present in the TLB 134e4d5bf4fSRichard Henderson * because we just found the mapping. 135e4d5bf4fSRichard Henderson * TODO: Perhaps there should be a cputlb helper that returns a 136e4d5bf4fSRichard Henderson * matching tlb entry + iotlb entry. 137e4d5bf4fSRichard Henderson */ 138e4d5bf4fSRichard Henderson index = tlb_index(env, ptr_mmu_idx, ptr); 139e4d5bf4fSRichard Henderson # ifdef CONFIG_DEBUG_TCG 140e4d5bf4fSRichard Henderson { 141e4d5bf4fSRichard Henderson CPUTLBEntry *entry = tlb_entry(env, ptr_mmu_idx, ptr); 142e4d5bf4fSRichard Henderson target_ulong comparator = (ptr_access == MMU_DATA_LOAD 143e4d5bf4fSRichard Henderson ? entry->addr_read 144e4d5bf4fSRichard Henderson : tlb_addr_write(entry)); 145e4d5bf4fSRichard Henderson g_assert(tlb_hit(comparator, ptr)); 146e4d5bf4fSRichard Henderson } 147e4d5bf4fSRichard Henderson # endif 148e4d5bf4fSRichard Henderson iotlbentry = &env_tlb(env)->d[ptr_mmu_idx].iotlb[index]; 149e4d5bf4fSRichard Henderson 150e4d5bf4fSRichard Henderson /* If the virtual page MemAttr != Tagged, access unchecked. */ 151e4d5bf4fSRichard Henderson if (!arm_tlb_mte_tagged(&iotlbentry->attrs)) { 152e4d5bf4fSRichard Henderson return NULL; 153e4d5bf4fSRichard Henderson } 154e4d5bf4fSRichard Henderson 155e4d5bf4fSRichard Henderson /* 156e4d5bf4fSRichard Henderson * If not backed by host ram, there is no tag storage: access unchecked. 157e4d5bf4fSRichard Henderson * This is probably a guest os bug though, so log it. 158e4d5bf4fSRichard Henderson */ 159e4d5bf4fSRichard Henderson if (unlikely(flags & TLB_MMIO)) { 160e4d5bf4fSRichard Henderson qemu_log_mask(LOG_GUEST_ERROR, 161e4d5bf4fSRichard Henderson "Page @ 0x%" PRIx64 " indicates Tagged Normal memory " 162e4d5bf4fSRichard Henderson "but is not backed by host ram\n", ptr); 163e4d5bf4fSRichard Henderson return NULL; 164e4d5bf4fSRichard Henderson } 165e4d5bf4fSRichard Henderson 166e4d5bf4fSRichard Henderson /* 167e4d5bf4fSRichard Henderson * The Normal memory access can extend to the next page. E.g. a single 168e4d5bf4fSRichard Henderson * 8-byte access to the last byte of a page will check only the last 169e4d5bf4fSRichard Henderson * tag on the first page. 170e4d5bf4fSRichard Henderson * Any page access exception has priority over tag check exception. 171e4d5bf4fSRichard Henderson */ 172e4d5bf4fSRichard Henderson in_page = -(ptr | TARGET_PAGE_MASK); 173e4d5bf4fSRichard Henderson if (unlikely(ptr_size > in_page)) { 174e4d5bf4fSRichard Henderson void *ignore; 175e4d5bf4fSRichard Henderson flags |= probe_access_flags(env, ptr + in_page, ptr_access, 176e4d5bf4fSRichard Henderson ptr_mmu_idx, ra == 0, &ignore, ra); 177e4d5bf4fSRichard Henderson assert(!(flags & TLB_INVALID_MASK)); 178e4d5bf4fSRichard Henderson } 179e4d5bf4fSRichard Henderson 180e4d5bf4fSRichard Henderson /* Any debug exception has priority over a tag check exception. */ 181e4d5bf4fSRichard Henderson if (unlikely(flags & TLB_WATCHPOINT)) { 182e4d5bf4fSRichard Henderson int wp = ptr_access == MMU_DATA_LOAD ? BP_MEM_READ : BP_MEM_WRITE; 183e4d5bf4fSRichard Henderson assert(ra != 0); 184e4d5bf4fSRichard Henderson cpu_check_watchpoint(env_cpu(env), ptr, ptr_size, 185e4d5bf4fSRichard Henderson iotlbentry->attrs, wp, ra); 186e4d5bf4fSRichard Henderson } 187e4d5bf4fSRichard Henderson 188e4d5bf4fSRichard Henderson /* 189e4d5bf4fSRichard Henderson * Find the physical address within the normal mem space. 190e4d5bf4fSRichard Henderson * The memory region lookup must succeed because TLB_MMIO was 191e4d5bf4fSRichard Henderson * not set in the cputlb lookup above. 192e4d5bf4fSRichard Henderson */ 193e4d5bf4fSRichard Henderson mr = memory_region_from_host(host, &ptr_ra); 194e4d5bf4fSRichard Henderson tcg_debug_assert(mr != NULL); 195e4d5bf4fSRichard Henderson tcg_debug_assert(memory_region_is_ram(mr)); 196e4d5bf4fSRichard Henderson ptr_paddr = ptr_ra; 197e4d5bf4fSRichard Henderson do { 198e4d5bf4fSRichard Henderson ptr_paddr += mr->addr; 199e4d5bf4fSRichard Henderson mr = mr->container; 200e4d5bf4fSRichard Henderson } while (mr); 201e4d5bf4fSRichard Henderson 202e4d5bf4fSRichard Henderson /* Convert to the physical address in tag space. */ 203e4d5bf4fSRichard Henderson tag_paddr = ptr_paddr >> (LOG2_TAG_GRANULE + 1); 204e4d5bf4fSRichard Henderson 205e4d5bf4fSRichard Henderson /* Look up the address in tag space. */ 206e4d5bf4fSRichard Henderson tag_asi = iotlbentry->attrs.secure ? ARMASIdx_TagS : ARMASIdx_TagNS; 207e4d5bf4fSRichard Henderson tag_as = cpu_get_address_space(env_cpu(env), tag_asi); 208e4d5bf4fSRichard Henderson mr = address_space_translate(tag_as, tag_paddr, &xlat, NULL, 209e4d5bf4fSRichard Henderson tag_access == MMU_DATA_STORE, 210e4d5bf4fSRichard Henderson iotlbentry->attrs); 211e4d5bf4fSRichard Henderson 212e4d5bf4fSRichard Henderson /* 213e4d5bf4fSRichard Henderson * Note that @mr will never be NULL. If there is nothing in the address 214e4d5bf4fSRichard Henderson * space at @tag_paddr, the translation will return the unallocated memory 215e4d5bf4fSRichard Henderson * region. For our purposes, the result must be ram. 216e4d5bf4fSRichard Henderson */ 217e4d5bf4fSRichard Henderson if (unlikely(!memory_region_is_ram(mr))) { 218e4d5bf4fSRichard Henderson /* ??? Failure is a board configuration error. */ 219e4d5bf4fSRichard Henderson qemu_log_mask(LOG_UNIMP, 220e4d5bf4fSRichard Henderson "Tag Memory @ 0x%" HWADDR_PRIx " not found for " 221e4d5bf4fSRichard Henderson "Normal Memory @ 0x%" HWADDR_PRIx "\n", 222e4d5bf4fSRichard Henderson tag_paddr, ptr_paddr); 223e4d5bf4fSRichard Henderson return NULL; 224e4d5bf4fSRichard Henderson } 225e4d5bf4fSRichard Henderson 226e4d5bf4fSRichard Henderson /* 227e4d5bf4fSRichard Henderson * Ensure the tag memory is dirty on write, for migration. 228e4d5bf4fSRichard Henderson * Tag memory can never contain code or display memory (vga). 229e4d5bf4fSRichard Henderson */ 230e4d5bf4fSRichard Henderson if (tag_access == MMU_DATA_STORE) { 231e4d5bf4fSRichard Henderson ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat; 232e4d5bf4fSRichard Henderson cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); 233e4d5bf4fSRichard Henderson } 234e4d5bf4fSRichard Henderson 235e4d5bf4fSRichard Henderson return memory_region_get_ram_ptr(mr) + xlat; 236e4d5bf4fSRichard Henderson #endif 237c15294c1SRichard Henderson } 238c15294c1SRichard Henderson 239da54941fSRichard Henderson uint64_t HELPER(irg)(CPUARMState *env, uint64_t rn, uint64_t rm) 240da54941fSRichard Henderson { 241da54941fSRichard Henderson uint16_t exclude = extract32(rm | env->cp15.gcr_el1, 0, 16); 242d4f6dda1SRichard Henderson int rrnd = extract32(env->cp15.gcr_el1, 16, 1); 243da54941fSRichard Henderson int start = extract32(env->cp15.rgsr_el1, 0, 4); 244da54941fSRichard Henderson int seed = extract32(env->cp15.rgsr_el1, 8, 16); 245d4f6dda1SRichard Henderson int offset, i, rtag; 246d4f6dda1SRichard Henderson 247d4f6dda1SRichard Henderson /* 248d4f6dda1SRichard Henderson * Our IMPDEF choice for GCR_EL1.RRND==1 is to continue to use the 249d4f6dda1SRichard Henderson * deterministic algorithm. Except that with RRND==1 the kernel is 250d4f6dda1SRichard Henderson * not required to have set RGSR_EL1.SEED != 0, which is required for 251d4f6dda1SRichard Henderson * the deterministic algorithm to function. So we force a non-zero 252d4f6dda1SRichard Henderson * SEED for that case. 253d4f6dda1SRichard Henderson */ 254d4f6dda1SRichard Henderson if (unlikely(seed == 0) && rrnd) { 255d4f6dda1SRichard Henderson do { 256d4f6dda1SRichard Henderson Error *err = NULL; 257d4f6dda1SRichard Henderson uint16_t two; 258d4f6dda1SRichard Henderson 259d4f6dda1SRichard Henderson if (qemu_guest_getrandom(&two, sizeof(two), &err) < 0) { 260d4f6dda1SRichard Henderson /* 261d4f6dda1SRichard Henderson * Failed, for unknown reasons in the crypto subsystem. 262d4f6dda1SRichard Henderson * Best we can do is log the reason and use a constant seed. 263d4f6dda1SRichard Henderson */ 264d4f6dda1SRichard Henderson qemu_log_mask(LOG_UNIMP, "IRG: Crypto failure: %s\n", 265d4f6dda1SRichard Henderson error_get_pretty(err)); 266d4f6dda1SRichard Henderson error_free(err); 267d4f6dda1SRichard Henderson two = 1; 268d4f6dda1SRichard Henderson } 269d4f6dda1SRichard Henderson seed = two; 270d4f6dda1SRichard Henderson } while (seed == 0); 271d4f6dda1SRichard Henderson } 272da54941fSRichard Henderson 273da54941fSRichard Henderson /* RandomTag */ 274da54941fSRichard Henderson for (i = offset = 0; i < 4; ++i) { 275da54941fSRichard Henderson /* NextRandomTagBit */ 276da54941fSRichard Henderson int top = (extract32(seed, 5, 1) ^ extract32(seed, 3, 1) ^ 277da54941fSRichard Henderson extract32(seed, 2, 1) ^ extract32(seed, 0, 1)); 278da54941fSRichard Henderson seed = (top << 15) | (seed >> 1); 279da54941fSRichard Henderson offset |= top << i; 280da54941fSRichard Henderson } 281da54941fSRichard Henderson rtag = choose_nonexcluded_tag(start, offset, exclude); 282da54941fSRichard Henderson env->cp15.rgsr_el1 = rtag | (seed << 8); 283da54941fSRichard Henderson 284da54941fSRichard Henderson return address_with_allocation_tag(rn, rtag); 285da54941fSRichard Henderson } 286efbc78adSRichard Henderson 287efbc78adSRichard Henderson uint64_t HELPER(addsubg)(CPUARMState *env, uint64_t ptr, 288efbc78adSRichard Henderson int32_t offset, uint32_t tag_offset) 289efbc78adSRichard Henderson { 290efbc78adSRichard Henderson int start_tag = allocation_tag_from_addr(ptr); 291efbc78adSRichard Henderson uint16_t exclude = extract32(env->cp15.gcr_el1, 0, 16); 292efbc78adSRichard Henderson int rtag = choose_nonexcluded_tag(start_tag, tag_offset, exclude); 293efbc78adSRichard Henderson 294efbc78adSRichard Henderson return address_with_allocation_tag(ptr + offset, rtag); 295efbc78adSRichard Henderson } 296c15294c1SRichard Henderson 297c15294c1SRichard Henderson static int load_tag1(uint64_t ptr, uint8_t *mem) 298c15294c1SRichard Henderson { 299c15294c1SRichard Henderson int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; 300c15294c1SRichard Henderson return extract32(*mem, ofs, 4); 301c15294c1SRichard Henderson } 302c15294c1SRichard Henderson 303c15294c1SRichard Henderson uint64_t HELPER(ldg)(CPUARMState *env, uint64_t ptr, uint64_t xt) 304c15294c1SRichard Henderson { 305c15294c1SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 306c15294c1SRichard Henderson uint8_t *mem; 307c15294c1SRichard Henderson int rtag = 0; 308c15294c1SRichard Henderson 309c15294c1SRichard Henderson /* Trap if accessing an invalid page. */ 310c15294c1SRichard Henderson mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 1, 311c15294c1SRichard Henderson MMU_DATA_LOAD, 1, GETPC()); 312c15294c1SRichard Henderson 313c15294c1SRichard Henderson /* Load if page supports tags. */ 314c15294c1SRichard Henderson if (mem) { 315c15294c1SRichard Henderson rtag = load_tag1(ptr, mem); 316c15294c1SRichard Henderson } 317c15294c1SRichard Henderson 318c15294c1SRichard Henderson return address_with_allocation_tag(xt, rtag); 319c15294c1SRichard Henderson } 320c15294c1SRichard Henderson 321c15294c1SRichard Henderson static void check_tag_aligned(CPUARMState *env, uint64_t ptr, uintptr_t ra) 322c15294c1SRichard Henderson { 323c15294c1SRichard Henderson if (unlikely(!QEMU_IS_ALIGNED(ptr, TAG_GRANULE))) { 324c15294c1SRichard Henderson arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE, 325c15294c1SRichard Henderson cpu_mmu_index(env, false), ra); 326c15294c1SRichard Henderson g_assert_not_reached(); 327c15294c1SRichard Henderson } 328c15294c1SRichard Henderson } 329c15294c1SRichard Henderson 330c15294c1SRichard Henderson /* For use in a non-parallel context, store to the given nibble. */ 331c15294c1SRichard Henderson static void store_tag1(uint64_t ptr, uint8_t *mem, int tag) 332c15294c1SRichard Henderson { 333c15294c1SRichard Henderson int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; 334c15294c1SRichard Henderson *mem = deposit32(*mem, ofs, 4, tag); 335c15294c1SRichard Henderson } 336c15294c1SRichard Henderson 337c15294c1SRichard Henderson /* For use in a parallel context, atomically store to the given nibble. */ 338c15294c1SRichard Henderson static void store_tag1_parallel(uint64_t ptr, uint8_t *mem, int tag) 339c15294c1SRichard Henderson { 340c15294c1SRichard Henderson int ofs = extract32(ptr, LOG2_TAG_GRANULE, 1) * 4; 341d73415a3SStefan Hajnoczi uint8_t old = qatomic_read(mem); 342c15294c1SRichard Henderson 343c15294c1SRichard Henderson while (1) { 344c15294c1SRichard Henderson uint8_t new = deposit32(old, ofs, 4, tag); 345d73415a3SStefan Hajnoczi uint8_t cmp = qatomic_cmpxchg(mem, old, new); 346c15294c1SRichard Henderson if (likely(cmp == old)) { 347c15294c1SRichard Henderson return; 348c15294c1SRichard Henderson } 349c15294c1SRichard Henderson old = cmp; 350c15294c1SRichard Henderson } 351c15294c1SRichard Henderson } 352c15294c1SRichard Henderson 353c15294c1SRichard Henderson typedef void stg_store1(uint64_t, uint8_t *, int); 354c15294c1SRichard Henderson 355c15294c1SRichard Henderson static inline void do_stg(CPUARMState *env, uint64_t ptr, uint64_t xt, 356c15294c1SRichard Henderson uintptr_t ra, stg_store1 store1) 357c15294c1SRichard Henderson { 358c15294c1SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 359c15294c1SRichard Henderson uint8_t *mem; 360c15294c1SRichard Henderson 361c15294c1SRichard Henderson check_tag_aligned(env, ptr, ra); 362c15294c1SRichard Henderson 363c15294c1SRichard Henderson /* Trap if accessing an invalid page. */ 364c15294c1SRichard Henderson mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, TAG_GRANULE, 365c15294c1SRichard Henderson MMU_DATA_STORE, 1, ra); 366c15294c1SRichard Henderson 367c15294c1SRichard Henderson /* Store if page supports tags. */ 368c15294c1SRichard Henderson if (mem) { 369c15294c1SRichard Henderson store1(ptr, mem, allocation_tag_from_addr(xt)); 370c15294c1SRichard Henderson } 371c15294c1SRichard Henderson } 372c15294c1SRichard Henderson 373c15294c1SRichard Henderson void HELPER(stg)(CPUARMState *env, uint64_t ptr, uint64_t xt) 374c15294c1SRichard Henderson { 375c15294c1SRichard Henderson do_stg(env, ptr, xt, GETPC(), store_tag1); 376c15294c1SRichard Henderson } 377c15294c1SRichard Henderson 378c15294c1SRichard Henderson void HELPER(stg_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) 379c15294c1SRichard Henderson { 380c15294c1SRichard Henderson do_stg(env, ptr, xt, GETPC(), store_tag1_parallel); 381c15294c1SRichard Henderson } 382c15294c1SRichard Henderson 383c15294c1SRichard Henderson void HELPER(stg_stub)(CPUARMState *env, uint64_t ptr) 384c15294c1SRichard Henderson { 385c15294c1SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 386c15294c1SRichard Henderson uintptr_t ra = GETPC(); 387c15294c1SRichard Henderson 388c15294c1SRichard Henderson check_tag_aligned(env, ptr, ra); 389c15294c1SRichard Henderson probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); 390c15294c1SRichard Henderson } 391c15294c1SRichard Henderson 392c15294c1SRichard Henderson static inline void do_st2g(CPUARMState *env, uint64_t ptr, uint64_t xt, 393c15294c1SRichard Henderson uintptr_t ra, stg_store1 store1) 394c15294c1SRichard Henderson { 395c15294c1SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 396c15294c1SRichard Henderson int tag = allocation_tag_from_addr(xt); 397c15294c1SRichard Henderson uint8_t *mem1, *mem2; 398c15294c1SRichard Henderson 399c15294c1SRichard Henderson check_tag_aligned(env, ptr, ra); 400c15294c1SRichard Henderson 401c15294c1SRichard Henderson /* 402c15294c1SRichard Henderson * Trap if accessing an invalid page(s). 403c15294c1SRichard Henderson * This takes priority over !allocation_tag_access_enabled. 404c15294c1SRichard Henderson */ 405c15294c1SRichard Henderson if (ptr & TAG_GRANULE) { 406c15294c1SRichard Henderson /* Two stores unaligned mod TAG_GRANULE*2 -- modify two bytes. */ 407c15294c1SRichard Henderson mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, 408c15294c1SRichard Henderson TAG_GRANULE, MMU_DATA_STORE, 1, ra); 409c15294c1SRichard Henderson mem2 = allocation_tag_mem(env, mmu_idx, ptr + TAG_GRANULE, 410c15294c1SRichard Henderson MMU_DATA_STORE, TAG_GRANULE, 411c15294c1SRichard Henderson MMU_DATA_STORE, 1, ra); 412c15294c1SRichard Henderson 413c15294c1SRichard Henderson /* Store if page(s) support tags. */ 414c15294c1SRichard Henderson if (mem1) { 415c15294c1SRichard Henderson store1(TAG_GRANULE, mem1, tag); 416c15294c1SRichard Henderson } 417c15294c1SRichard Henderson if (mem2) { 418c15294c1SRichard Henderson store1(0, mem2, tag); 419c15294c1SRichard Henderson } 420c15294c1SRichard Henderson } else { 421c15294c1SRichard Henderson /* Two stores aligned mod TAG_GRANULE*2 -- modify one byte. */ 422c15294c1SRichard Henderson mem1 = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, 423c15294c1SRichard Henderson 2 * TAG_GRANULE, MMU_DATA_STORE, 1, ra); 424c15294c1SRichard Henderson if (mem1) { 425c15294c1SRichard Henderson tag |= tag << 4; 426d73415a3SStefan Hajnoczi qatomic_set(mem1, tag); 427c15294c1SRichard Henderson } 428c15294c1SRichard Henderson } 429c15294c1SRichard Henderson } 430c15294c1SRichard Henderson 431c15294c1SRichard Henderson void HELPER(st2g)(CPUARMState *env, uint64_t ptr, uint64_t xt) 432c15294c1SRichard Henderson { 433c15294c1SRichard Henderson do_st2g(env, ptr, xt, GETPC(), store_tag1); 434c15294c1SRichard Henderson } 435c15294c1SRichard Henderson 436c15294c1SRichard Henderson void HELPER(st2g_parallel)(CPUARMState *env, uint64_t ptr, uint64_t xt) 437c15294c1SRichard Henderson { 438c15294c1SRichard Henderson do_st2g(env, ptr, xt, GETPC(), store_tag1_parallel); 439c15294c1SRichard Henderson } 440c15294c1SRichard Henderson 441c15294c1SRichard Henderson void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr) 442c15294c1SRichard Henderson { 443c15294c1SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 444c15294c1SRichard Henderson uintptr_t ra = GETPC(); 445c15294c1SRichard Henderson int in_page = -(ptr | TARGET_PAGE_MASK); 446c15294c1SRichard Henderson 447c15294c1SRichard Henderson check_tag_aligned(env, ptr, ra); 448c15294c1SRichard Henderson 449c15294c1SRichard Henderson if (likely(in_page >= 2 * TAG_GRANULE)) { 450c15294c1SRichard Henderson probe_write(env, ptr, 2 * TAG_GRANULE, mmu_idx, ra); 451c15294c1SRichard Henderson } else { 452c15294c1SRichard Henderson probe_write(env, ptr, TAG_GRANULE, mmu_idx, ra); 453c15294c1SRichard Henderson probe_write(env, ptr + TAG_GRANULE, TAG_GRANULE, mmu_idx, ra); 454c15294c1SRichard Henderson } 455c15294c1SRichard Henderson } 4565f716a82SRichard Henderson 4575f716a82SRichard Henderson #define LDGM_STGM_SIZE (4 << GMID_EL1_BS) 4585f716a82SRichard Henderson 4595f716a82SRichard Henderson uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr) 4605f716a82SRichard Henderson { 4615f716a82SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 4625f716a82SRichard Henderson uintptr_t ra = GETPC(); 4635f716a82SRichard Henderson void *tag_mem; 4645f716a82SRichard Henderson 4655f716a82SRichard Henderson ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE); 4665f716a82SRichard Henderson 4675f716a82SRichard Henderson /* Trap if accessing an invalid page. */ 4685f716a82SRichard Henderson tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD, 4695f716a82SRichard Henderson LDGM_STGM_SIZE, MMU_DATA_LOAD, 4705f716a82SRichard Henderson LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra); 4715f716a82SRichard Henderson 4725f716a82SRichard Henderson /* The tag is squashed to zero if the page does not support tags. */ 4735f716a82SRichard Henderson if (!tag_mem) { 4745f716a82SRichard Henderson return 0; 4755f716a82SRichard Henderson } 4765f716a82SRichard Henderson 4775f716a82SRichard Henderson QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6); 4785f716a82SRichard Henderson /* 4795f716a82SRichard Henderson * We are loading 64-bits worth of tags. The ordering of elements 4805f716a82SRichard Henderson * within the word corresponds to a 64-bit little-endian operation. 4815f716a82SRichard Henderson */ 4825f716a82SRichard Henderson return ldq_le_p(tag_mem); 4835f716a82SRichard Henderson } 4845f716a82SRichard Henderson 4855f716a82SRichard Henderson void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val) 4865f716a82SRichard Henderson { 4875f716a82SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 4885f716a82SRichard Henderson uintptr_t ra = GETPC(); 4895f716a82SRichard Henderson void *tag_mem; 4905f716a82SRichard Henderson 4915f716a82SRichard Henderson ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE); 4925f716a82SRichard Henderson 4935f716a82SRichard Henderson /* Trap if accessing an invalid page. */ 4945f716a82SRichard Henderson tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, 4955f716a82SRichard Henderson LDGM_STGM_SIZE, MMU_DATA_LOAD, 4965f716a82SRichard Henderson LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra); 4975f716a82SRichard Henderson 4985f716a82SRichard Henderson /* 4995f716a82SRichard Henderson * Tag store only happens if the page support tags, 5005f716a82SRichard Henderson * and if the OS has enabled access to the tags. 5015f716a82SRichard Henderson */ 5025f716a82SRichard Henderson if (!tag_mem) { 5035f716a82SRichard Henderson return; 5045f716a82SRichard Henderson } 5055f716a82SRichard Henderson 5065f716a82SRichard Henderson QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6); 5075f716a82SRichard Henderson /* 5085f716a82SRichard Henderson * We are storing 64-bits worth of tags. The ordering of elements 5095f716a82SRichard Henderson * within the word corresponds to a 64-bit little-endian operation. 5105f716a82SRichard Henderson */ 5115f716a82SRichard Henderson stq_le_p(tag_mem, val); 5125f716a82SRichard Henderson } 5135f716a82SRichard Henderson 5145f716a82SRichard Henderson void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val) 5155f716a82SRichard Henderson { 5165f716a82SRichard Henderson uintptr_t ra = GETPC(); 5175f716a82SRichard Henderson int mmu_idx = cpu_mmu_index(env, false); 5185f716a82SRichard Henderson int log2_dcz_bytes, log2_tag_bytes; 5195f716a82SRichard Henderson intptr_t dcz_bytes, tag_bytes; 5205f716a82SRichard Henderson uint8_t *mem; 5215f716a82SRichard Henderson 5225f716a82SRichard Henderson /* 5235f716a82SRichard Henderson * In arm_cpu_realizefn, we assert that dcz > LOG2_TAG_GRANULE+1, 5245f716a82SRichard Henderson * i.e. 32 bytes, which is an unreasonably small dcz anyway, 5255f716a82SRichard Henderson * to make sure that we can access one complete tag byte here. 5265f716a82SRichard Henderson */ 5275f716a82SRichard Henderson log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; 5285f716a82SRichard Henderson log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); 5295f716a82SRichard Henderson dcz_bytes = (intptr_t)1 << log2_dcz_bytes; 5305f716a82SRichard Henderson tag_bytes = (intptr_t)1 << log2_tag_bytes; 5315f716a82SRichard Henderson ptr &= -dcz_bytes; 5325f716a82SRichard Henderson 5335f716a82SRichard Henderson mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE, dcz_bytes, 5345f716a82SRichard Henderson MMU_DATA_STORE, tag_bytes, ra); 5355f716a82SRichard Henderson if (mem) { 5365f716a82SRichard Henderson int tag_pair = (val & 0xf) * 0x11; 5375f716a82SRichard Henderson memset(mem, tag_pair, tag_bytes); 5385f716a82SRichard Henderson } 5395f716a82SRichard Henderson } 5400a405be2SRichard Henderson 5412e34ff45SRichard Henderson /* Record a tag check failure. */ 542dbf8c321SRichard Henderson static void mte_check_fail(CPUARMState *env, uint32_t desc, 5432e34ff45SRichard Henderson uint64_t dirty_ptr, uintptr_t ra) 5442e34ff45SRichard Henderson { 545dbf8c321SRichard Henderson int mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 5462e34ff45SRichard Henderson ARMMMUIdx arm_mmu_idx = core_to_aa64_mmu_idx(mmu_idx); 5479a4670beSRichard Henderson int el, reg_el, tcf, select, is_write, syn; 5482e34ff45SRichard Henderson uint64_t sctlr; 5492e34ff45SRichard Henderson 5502e34ff45SRichard Henderson reg_el = regime_el(env, arm_mmu_idx); 5512e34ff45SRichard Henderson sctlr = env->cp15.sctlr_el[reg_el]; 5522e34ff45SRichard Henderson 5532d928adfSPeter Collingbourne switch (arm_mmu_idx) { 5542d928adfSPeter Collingbourne case ARMMMUIdx_E10_0: 5552d928adfSPeter Collingbourne case ARMMMUIdx_E20_0: 5562d928adfSPeter Collingbourne el = 0; 5572e34ff45SRichard Henderson tcf = extract64(sctlr, 38, 2); 5582d928adfSPeter Collingbourne break; 5592d928adfSPeter Collingbourne default: 5602d928adfSPeter Collingbourne el = reg_el; 5612e34ff45SRichard Henderson tcf = extract64(sctlr, 40, 2); 5622e34ff45SRichard Henderson } 5632e34ff45SRichard Henderson 5642e34ff45SRichard Henderson switch (tcf) { 5652e34ff45SRichard Henderson case 1: 5662e34ff45SRichard Henderson /* 5672e34ff45SRichard Henderson * Tag check fail causes a synchronous exception. 5682e34ff45SRichard Henderson * 5692e34ff45SRichard Henderson * In restore_state_to_opc, we set the exception syndrome 5702e34ff45SRichard Henderson * for the load or store operation. Unwind first so we 5712e34ff45SRichard Henderson * may overwrite that with the syndrome for the tag check. 5722e34ff45SRichard Henderson */ 5732e34ff45SRichard Henderson cpu_restore_state(env_cpu(env), ra, true); 5742e34ff45SRichard Henderson env->exception.vaddress = dirty_ptr; 5759a4670beSRichard Henderson 5769a4670beSRichard Henderson is_write = FIELD_EX32(desc, MTEDESC, WRITE); 5772d928adfSPeter Collingbourne syn = syn_data_abort_no_iss(arm_current_el(env) != 0, 0, 0, 0, 0, 5782d928adfSPeter Collingbourne is_write, 0x11); 5799a4670beSRichard Henderson raise_exception(env, EXCP_DATA_ABORT, syn, exception_target_el(env)); 5802e34ff45SRichard Henderson /* noreturn, but fall through to the assert anyway */ 5812e34ff45SRichard Henderson 5822e34ff45SRichard Henderson case 0: 5832e34ff45SRichard Henderson /* 5842e34ff45SRichard Henderson * Tag check fail does not affect the PE. 5852e34ff45SRichard Henderson * We eliminate this case by not setting MTE_ACTIVE 5862e34ff45SRichard Henderson * in tb_flags, so that we never make this runtime call. 5872e34ff45SRichard Henderson */ 5882e34ff45SRichard Henderson g_assert_not_reached(); 5892e34ff45SRichard Henderson 5902e34ff45SRichard Henderson case 2: 5912e34ff45SRichard Henderson /* Tag check fail causes asynchronous flag set. */ 5924aedfc0fSRichard Henderson if (regime_has_2_ranges(arm_mmu_idx)) { 5932e34ff45SRichard Henderson select = extract64(dirty_ptr, 55, 1); 5942e34ff45SRichard Henderson } else { 5952e34ff45SRichard Henderson select = 0; 5962e34ff45SRichard Henderson } 5972e34ff45SRichard Henderson env->cp15.tfsr_el[el] |= 1 << select; 5985d70c351SRichard Henderson #ifdef CONFIG_USER_ONLY 5995d70c351SRichard Henderson /* 6005d70c351SRichard Henderson * Stand in for a timer irq, setting _TIF_MTE_ASYNC_FAULT, 6015d70c351SRichard Henderson * which then sends a SIGSEGV when the thread is next scheduled. 6025d70c351SRichard Henderson * This cpu will return to the main loop at the end of the TB, 6035d70c351SRichard Henderson * which is rather sooner than "normal". But the alternative 6045d70c351SRichard Henderson * is waiting until the next syscall. 6055d70c351SRichard Henderson */ 6065d70c351SRichard Henderson qemu_cpu_kick(env_cpu(env)); 6075d70c351SRichard Henderson #endif 6082e34ff45SRichard Henderson break; 6092e34ff45SRichard Henderson 6102e34ff45SRichard Henderson default: 6112e34ff45SRichard Henderson /* Case 3: Reserved. */ 6122e34ff45SRichard Henderson qemu_log_mask(LOG_GUEST_ERROR, 6132e34ff45SRichard Henderson "Tag check failure with SCTLR_EL%d.TCF%s " 6142e34ff45SRichard Henderson "set to reserved value %d\n", 6152e34ff45SRichard Henderson reg_el, el ? "" : "0", tcf); 6162e34ff45SRichard Henderson break; 6172e34ff45SRichard Henderson } 6182e34ff45SRichard Henderson } 6192e34ff45SRichard Henderson 6205add8248SRichard Henderson /** 6215add8248SRichard Henderson * checkN: 6225add8248SRichard Henderson * @tag: tag memory to test 6235add8248SRichard Henderson * @odd: true to begin testing at tags at odd nibble 6245add8248SRichard Henderson * @cmp: the tag to compare against 6255add8248SRichard Henderson * @count: number of tags to test 6265add8248SRichard Henderson * 6275add8248SRichard Henderson * Return the number of successful tests. 6285add8248SRichard Henderson * Thus a return value < @count indicates a failure. 6295add8248SRichard Henderson * 6305add8248SRichard Henderson * A note about sizes: count is expected to be small. 6315add8248SRichard Henderson * 6325add8248SRichard Henderson * The most common use will be LDP/STP of two integer registers, 6335add8248SRichard Henderson * which means 16 bytes of memory touching at most 2 tags, but 6345add8248SRichard Henderson * often the access is aligned and thus just 1 tag. 6355add8248SRichard Henderson * 6365add8248SRichard Henderson * Using AdvSIMD LD/ST (multiple), one can access 64 bytes of memory, 6375add8248SRichard Henderson * touching at most 5 tags. SVE LDR/STR (vector) with the default 6385add8248SRichard Henderson * vector length is also 64 bytes; the maximum architectural length 6395add8248SRichard Henderson * is 256 bytes touching at most 9 tags. 6405add8248SRichard Henderson * 6415add8248SRichard Henderson * The loop below uses 7 logical operations and 1 memory operation 6425add8248SRichard Henderson * per tag pair. An implementation that loads an aligned word and 6435add8248SRichard Henderson * uses masking to ignore adjacent tags requires 18 logical operations 6445add8248SRichard Henderson * and thus does not begin to pay off until 6 tags. 6455add8248SRichard Henderson * Which, according to the survey above, is unlikely to be common. 6465add8248SRichard Henderson */ 6475add8248SRichard Henderson static int checkN(uint8_t *mem, int odd, int cmp, int count) 6485add8248SRichard Henderson { 6495add8248SRichard Henderson int n = 0, diff; 6505add8248SRichard Henderson 6515add8248SRichard Henderson /* Replicate the test tag and compare. */ 6525add8248SRichard Henderson cmp *= 0x11; 6535add8248SRichard Henderson diff = *mem++ ^ cmp; 6545add8248SRichard Henderson 6555add8248SRichard Henderson if (odd) { 6565add8248SRichard Henderson goto start_odd; 6575add8248SRichard Henderson } 6585add8248SRichard Henderson 6595add8248SRichard Henderson while (1) { 6605add8248SRichard Henderson /* Test even tag. */ 6615add8248SRichard Henderson if (unlikely((diff) & 0x0f)) { 6625add8248SRichard Henderson break; 6635add8248SRichard Henderson } 6645add8248SRichard Henderson if (++n == count) { 6655add8248SRichard Henderson break; 6665add8248SRichard Henderson } 6675add8248SRichard Henderson 6685add8248SRichard Henderson start_odd: 6695add8248SRichard Henderson /* Test odd tag. */ 6705add8248SRichard Henderson if (unlikely((diff) & 0xf0)) { 6715add8248SRichard Henderson break; 6725add8248SRichard Henderson } 6735add8248SRichard Henderson if (++n == count) { 6745add8248SRichard Henderson break; 6755add8248SRichard Henderson } 6765add8248SRichard Henderson 6775add8248SRichard Henderson diff = *mem++ ^ cmp; 6785add8248SRichard Henderson } 6795add8248SRichard Henderson return n; 6805add8248SRichard Henderson } 6815add8248SRichard Henderson 682f8c8a860SRichard Henderson /** 683f8c8a860SRichard Henderson * mte_probe_int() - helper for mte_probe and mte_check 684f8c8a860SRichard Henderson * @env: CPU environment 685f8c8a860SRichard Henderson * @desc: MTEDESC descriptor 686f8c8a860SRichard Henderson * @ptr: virtual address of the base of the access 687f8c8a860SRichard Henderson * @fault: return virtual address of the first check failure 688f8c8a860SRichard Henderson * 689f8c8a860SRichard Henderson * Internal routine for both mte_probe and mte_check. 690f8c8a860SRichard Henderson * Return zero on failure, filling in *fault. 691f8c8a860SRichard Henderson * Return negative on trivial success for tbi disabled. 692f8c8a860SRichard Henderson * Return positive on success with tbi enabled. 693f8c8a860SRichard Henderson */ 694f8c8a860SRichard Henderson static int mte_probe_int(CPUARMState *env, uint32_t desc, uint64_t ptr, 695f8c8a860SRichard Henderson uintptr_t ra, uint32_t total, uint64_t *fault) 6965add8248SRichard Henderson { 6975add8248SRichard Henderson int mmu_idx, ptr_tag, bit55; 69898f96050SRichard Henderson uint64_t ptr_last, prev_page, next_page; 69998f96050SRichard Henderson uint64_t tag_first, tag_last; 70098f96050SRichard Henderson uint64_t tag_byte_first, tag_byte_last; 701f8c8a860SRichard Henderson uint32_t tag_count, tag_size, n, c; 7025add8248SRichard Henderson uint8_t *mem1, *mem2; 7035add8248SRichard Henderson MMUAccessType type; 7045add8248SRichard Henderson 7055add8248SRichard Henderson bit55 = extract64(ptr, 55, 1); 706f8c8a860SRichard Henderson *fault = ptr; 7075add8248SRichard Henderson 7085add8248SRichard Henderson /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ 7095add8248SRichard Henderson if (unlikely(!tbi_check(desc, bit55))) { 710f8c8a860SRichard Henderson return -1; 7115add8248SRichard Henderson } 7125add8248SRichard Henderson 7135add8248SRichard Henderson ptr_tag = allocation_tag_from_addr(ptr); 7145add8248SRichard Henderson 7155add8248SRichard Henderson if (tcma_check(desc, bit55, ptr_tag)) { 716f8c8a860SRichard Henderson return 1; 7175add8248SRichard Henderson } 7185add8248SRichard Henderson 7195add8248SRichard Henderson mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 7205add8248SRichard Henderson type = FIELD_EX32(desc, MTEDESC, WRITE) ? MMU_DATA_STORE : MMU_DATA_LOAD; 7215add8248SRichard Henderson 72298f96050SRichard Henderson /* Find the addr of the end of the access */ 72398f96050SRichard Henderson ptr_last = ptr + total - 1; 7245add8248SRichard Henderson 7255add8248SRichard Henderson /* Round the bounds to the tag granule, and compute the number of tags. */ 7265add8248SRichard Henderson tag_first = QEMU_ALIGN_DOWN(ptr, TAG_GRANULE); 72798f96050SRichard Henderson tag_last = QEMU_ALIGN_DOWN(ptr_last, TAG_GRANULE); 72898f96050SRichard Henderson tag_count = ((tag_last - tag_first) / TAG_GRANULE) + 1; 7295add8248SRichard Henderson 7305add8248SRichard Henderson /* Round the bounds to twice the tag granule, and compute the bytes. */ 7315add8248SRichard Henderson tag_byte_first = QEMU_ALIGN_DOWN(ptr, 2 * TAG_GRANULE); 73298f96050SRichard Henderson tag_byte_last = QEMU_ALIGN_DOWN(ptr_last, 2 * TAG_GRANULE); 7335add8248SRichard Henderson 7345add8248SRichard Henderson /* Locate the page boundaries. */ 7355add8248SRichard Henderson prev_page = ptr & TARGET_PAGE_MASK; 7365add8248SRichard Henderson next_page = prev_page + TARGET_PAGE_SIZE; 7375add8248SRichard Henderson 73898f96050SRichard Henderson if (likely(tag_last - prev_page <= TARGET_PAGE_SIZE)) { 7395add8248SRichard Henderson /* Memory access stays on one page. */ 74098f96050SRichard Henderson tag_size = ((tag_byte_last - tag_byte_first) / (2 * TAG_GRANULE)) + 1; 7415add8248SRichard Henderson mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, total, 7425add8248SRichard Henderson MMU_DATA_LOAD, tag_size, ra); 7435add8248SRichard Henderson if (!mem1) { 744f8c8a860SRichard Henderson return 1; 7455add8248SRichard Henderson } 7465add8248SRichard Henderson /* Perform all of the comparisons. */ 7475add8248SRichard Henderson n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, tag_count); 7485add8248SRichard Henderson } else { 7495add8248SRichard Henderson /* Memory access crosses to next page. */ 7505add8248SRichard Henderson tag_size = (next_page - tag_byte_first) / (2 * TAG_GRANULE); 7515add8248SRichard Henderson mem1 = allocation_tag_mem(env, mmu_idx, ptr, type, next_page - ptr, 7525add8248SRichard Henderson MMU_DATA_LOAD, tag_size, ra); 7535add8248SRichard Henderson 75498f96050SRichard Henderson tag_size = ((tag_byte_last - next_page) / (2 * TAG_GRANULE)) + 1; 7555add8248SRichard Henderson mem2 = allocation_tag_mem(env, mmu_idx, next_page, type, 75698f96050SRichard Henderson ptr_last - next_page + 1, 7575add8248SRichard Henderson MMU_DATA_LOAD, tag_size, ra); 7585add8248SRichard Henderson 7595add8248SRichard Henderson /* 7605add8248SRichard Henderson * Perform all of the comparisons. 7615add8248SRichard Henderson * Note the possible but unlikely case of the operation spanning 7625add8248SRichard Henderson * two pages that do not both have tagging enabled. 7635add8248SRichard Henderson */ 7645add8248SRichard Henderson n = c = (next_page - tag_first) / TAG_GRANULE; 7655add8248SRichard Henderson if (mem1) { 7665add8248SRichard Henderson n = checkN(mem1, ptr & TAG_GRANULE, ptr_tag, c); 7675add8248SRichard Henderson } 7685add8248SRichard Henderson if (n == c) { 7695add8248SRichard Henderson if (!mem2) { 770f8c8a860SRichard Henderson return 1; 7715add8248SRichard Henderson } 7725add8248SRichard Henderson n += checkN(mem2, 0, ptr_tag, tag_count - c); 7735add8248SRichard Henderson } 7745add8248SRichard Henderson } 7755add8248SRichard Henderson 776f8c8a860SRichard Henderson if (likely(n == tag_count)) { 777f8c8a860SRichard Henderson return 1; 778f8c8a860SRichard Henderson } 779f8c8a860SRichard Henderson 7805add8248SRichard Henderson /* 78198f96050SRichard Henderson * If we failed, we know which granule. For the first granule, the 78298f96050SRichard Henderson * failure address is @ptr, the first byte accessed. Otherwise the 78398f96050SRichard Henderson * failure address is the first byte of the nth granule. 7845add8248SRichard Henderson */ 785f8c8a860SRichard Henderson if (n > 0) { 786f8c8a860SRichard Henderson *fault = tag_first + n * TAG_GRANULE; 787f8c8a860SRichard Henderson } 788f8c8a860SRichard Henderson return 0; 7895add8248SRichard Henderson } 7905add8248SRichard Henderson 791f8c8a860SRichard Henderson uint64_t mte_checkN(CPUARMState *env, uint32_t desc, 792f8c8a860SRichard Henderson uint64_t ptr, uintptr_t ra) 793f8c8a860SRichard Henderson { 794f8c8a860SRichard Henderson uint64_t fault; 795f8c8a860SRichard Henderson uint32_t total = FIELD_EX32(desc, MTEDESC, TSIZE); 796f8c8a860SRichard Henderson int ret = mte_probe_int(env, desc, ptr, ra, total, &fault); 797f8c8a860SRichard Henderson 798f8c8a860SRichard Henderson if (unlikely(ret == 0)) { 799f8c8a860SRichard Henderson mte_check_fail(env, desc, fault, ra); 800f8c8a860SRichard Henderson } else if (ret < 0) { 801f8c8a860SRichard Henderson return ptr; 802f8c8a860SRichard Henderson } 8035add8248SRichard Henderson return useronly_clean_ptr(ptr); 8045add8248SRichard Henderson } 8055add8248SRichard Henderson 80673ceeb00SRichard Henderson uint64_t HELPER(mte_checkN)(CPUARMState *env, uint32_t desc, uint64_t ptr) 80773ceeb00SRichard Henderson { 8085add8248SRichard Henderson return mte_checkN(env, desc, ptr, GETPC()); 80973ceeb00SRichard Henderson } 81046dc1bc0SRichard Henderson 811*4a09a213SRichard Henderson uint64_t mte_check1(CPUARMState *env, uint32_t desc, 812*4a09a213SRichard Henderson uint64_t ptr, uintptr_t ra) 813*4a09a213SRichard Henderson { 814*4a09a213SRichard Henderson uint64_t fault; 815*4a09a213SRichard Henderson uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE); 816*4a09a213SRichard Henderson int ret = mte_probe_int(env, desc, ptr, ra, total, &fault); 817*4a09a213SRichard Henderson 818*4a09a213SRichard Henderson if (unlikely(ret == 0)) { 819*4a09a213SRichard Henderson mte_check_fail(env, desc, fault, ra); 820*4a09a213SRichard Henderson } else if (ret < 0) { 821*4a09a213SRichard Henderson return ptr; 822*4a09a213SRichard Henderson } 823*4a09a213SRichard Henderson return useronly_clean_ptr(ptr); 824*4a09a213SRichard Henderson } 825*4a09a213SRichard Henderson 826*4a09a213SRichard Henderson uint64_t HELPER(mte_check1)(CPUARMState *env, uint32_t desc, uint64_t ptr) 827*4a09a213SRichard Henderson { 828*4a09a213SRichard Henderson return mte_check1(env, desc, ptr, GETPC()); 829*4a09a213SRichard Henderson } 830*4a09a213SRichard Henderson 831*4a09a213SRichard Henderson /* 832*4a09a213SRichard Henderson * No-fault version of mte_check1, to be used by SVE for MemSingleNF. 833*4a09a213SRichard Henderson * Returns false if the access is Checked and the check failed. This 834*4a09a213SRichard Henderson * is only intended to probe the tag -- the validity of the page must 835*4a09a213SRichard Henderson * be checked beforehand. 836*4a09a213SRichard Henderson */ 837*4a09a213SRichard Henderson bool mte_probe1(CPUARMState *env, uint32_t desc, uint64_t ptr) 838*4a09a213SRichard Henderson { 839*4a09a213SRichard Henderson uint64_t fault; 840*4a09a213SRichard Henderson uint32_t total = FIELD_EX32(desc, MTEDESC, ESIZE); 841*4a09a213SRichard Henderson int ret = mte_probe_int(env, desc, ptr, 0, total, &fault); 842*4a09a213SRichard Henderson 843*4a09a213SRichard Henderson return ret != 0; 844*4a09a213SRichard Henderson } 845*4a09a213SRichard Henderson 84646dc1bc0SRichard Henderson /* 84746dc1bc0SRichard Henderson * Perform an MTE checked access for DC_ZVA. 84846dc1bc0SRichard Henderson */ 84946dc1bc0SRichard Henderson uint64_t HELPER(mte_check_zva)(CPUARMState *env, uint32_t desc, uint64_t ptr) 85046dc1bc0SRichard Henderson { 85146dc1bc0SRichard Henderson uintptr_t ra = GETPC(); 85246dc1bc0SRichard Henderson int log2_dcz_bytes, log2_tag_bytes; 85346dc1bc0SRichard Henderson int mmu_idx, bit55; 85446dc1bc0SRichard Henderson intptr_t dcz_bytes, tag_bytes, i; 85546dc1bc0SRichard Henderson void *mem; 85646dc1bc0SRichard Henderson uint64_t ptr_tag, mem_tag, align_ptr; 85746dc1bc0SRichard Henderson 85846dc1bc0SRichard Henderson bit55 = extract64(ptr, 55, 1); 85946dc1bc0SRichard Henderson 86046dc1bc0SRichard Henderson /* If TBI is disabled, the access is unchecked, and ptr is not dirty. */ 86146dc1bc0SRichard Henderson if (unlikely(!tbi_check(desc, bit55))) { 86246dc1bc0SRichard Henderson return ptr; 86346dc1bc0SRichard Henderson } 86446dc1bc0SRichard Henderson 86546dc1bc0SRichard Henderson ptr_tag = allocation_tag_from_addr(ptr); 86646dc1bc0SRichard Henderson 86746dc1bc0SRichard Henderson if (tcma_check(desc, bit55, ptr_tag)) { 86846dc1bc0SRichard Henderson goto done; 86946dc1bc0SRichard Henderson } 87046dc1bc0SRichard Henderson 87146dc1bc0SRichard Henderson /* 87246dc1bc0SRichard Henderson * In arm_cpu_realizefn, we asserted that dcz > LOG2_TAG_GRANULE+1, 87346dc1bc0SRichard Henderson * i.e. 32 bytes, which is an unreasonably small dcz anyway, to make 87446dc1bc0SRichard Henderson * sure that we can access one complete tag byte here. 87546dc1bc0SRichard Henderson */ 87646dc1bc0SRichard Henderson log2_dcz_bytes = env_archcpu(env)->dcz_blocksize + 2; 87746dc1bc0SRichard Henderson log2_tag_bytes = log2_dcz_bytes - (LOG2_TAG_GRANULE + 1); 87846dc1bc0SRichard Henderson dcz_bytes = (intptr_t)1 << log2_dcz_bytes; 87946dc1bc0SRichard Henderson tag_bytes = (intptr_t)1 << log2_tag_bytes; 88046dc1bc0SRichard Henderson align_ptr = ptr & -dcz_bytes; 88146dc1bc0SRichard Henderson 88246dc1bc0SRichard Henderson /* 88346dc1bc0SRichard Henderson * Trap if accessing an invalid page. DC_ZVA requires that we supply 88446dc1bc0SRichard Henderson * the original pointer for an invalid page. But watchpoints require 88546dc1bc0SRichard Henderson * that we probe the actual space. So do both. 88646dc1bc0SRichard Henderson */ 88746dc1bc0SRichard Henderson mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX); 88846dc1bc0SRichard Henderson (void) probe_write(env, ptr, 1, mmu_idx, ra); 88946dc1bc0SRichard Henderson mem = allocation_tag_mem(env, mmu_idx, align_ptr, MMU_DATA_STORE, 89046dc1bc0SRichard Henderson dcz_bytes, MMU_DATA_LOAD, tag_bytes, ra); 89146dc1bc0SRichard Henderson if (!mem) { 89246dc1bc0SRichard Henderson goto done; 89346dc1bc0SRichard Henderson } 89446dc1bc0SRichard Henderson 89546dc1bc0SRichard Henderson /* 89646dc1bc0SRichard Henderson * Unlike the reasoning for checkN, DC_ZVA is always aligned, and thus 89746dc1bc0SRichard Henderson * it is quite easy to perform all of the comparisons at once without 89846dc1bc0SRichard Henderson * any extra masking. 89946dc1bc0SRichard Henderson * 90046dc1bc0SRichard Henderson * The most common zva block size is 64; some of the thunderx cpus use 90146dc1bc0SRichard Henderson * a block size of 128. For user-only, aarch64_max_initfn will set the 90246dc1bc0SRichard Henderson * block size to 512. Fill out the other cases for future-proofing. 90346dc1bc0SRichard Henderson * 90446dc1bc0SRichard Henderson * In order to be able to find the first miscompare later, we want the 90546dc1bc0SRichard Henderson * tag bytes to be in little-endian order. 90646dc1bc0SRichard Henderson */ 90746dc1bc0SRichard Henderson switch (log2_tag_bytes) { 90846dc1bc0SRichard Henderson case 0: /* zva_blocksize 32 */ 90946dc1bc0SRichard Henderson mem_tag = *(uint8_t *)mem; 91046dc1bc0SRichard Henderson ptr_tag *= 0x11u; 91146dc1bc0SRichard Henderson break; 91246dc1bc0SRichard Henderson case 1: /* zva_blocksize 64 */ 91346dc1bc0SRichard Henderson mem_tag = cpu_to_le16(*(uint16_t *)mem); 91446dc1bc0SRichard Henderson ptr_tag *= 0x1111u; 91546dc1bc0SRichard Henderson break; 91646dc1bc0SRichard Henderson case 2: /* zva_blocksize 128 */ 91746dc1bc0SRichard Henderson mem_tag = cpu_to_le32(*(uint32_t *)mem); 91846dc1bc0SRichard Henderson ptr_tag *= 0x11111111u; 91946dc1bc0SRichard Henderson break; 92046dc1bc0SRichard Henderson case 3: /* zva_blocksize 256 */ 92146dc1bc0SRichard Henderson mem_tag = cpu_to_le64(*(uint64_t *)mem); 92246dc1bc0SRichard Henderson ptr_tag *= 0x1111111111111111ull; 92346dc1bc0SRichard Henderson break; 92446dc1bc0SRichard Henderson 92546dc1bc0SRichard Henderson default: /* zva_blocksize 512, 1024, 2048 */ 92646dc1bc0SRichard Henderson ptr_tag *= 0x1111111111111111ull; 92746dc1bc0SRichard Henderson i = 0; 92846dc1bc0SRichard Henderson do { 92946dc1bc0SRichard Henderson mem_tag = cpu_to_le64(*(uint64_t *)(mem + i)); 93046dc1bc0SRichard Henderson if (unlikely(mem_tag != ptr_tag)) { 93146dc1bc0SRichard Henderson goto fail; 93246dc1bc0SRichard Henderson } 93346dc1bc0SRichard Henderson i += 8; 93446dc1bc0SRichard Henderson align_ptr += 16 * TAG_GRANULE; 93546dc1bc0SRichard Henderson } while (i < tag_bytes); 93646dc1bc0SRichard Henderson goto done; 93746dc1bc0SRichard Henderson } 93846dc1bc0SRichard Henderson 93946dc1bc0SRichard Henderson if (likely(mem_tag == ptr_tag)) { 94046dc1bc0SRichard Henderson goto done; 94146dc1bc0SRichard Henderson } 94246dc1bc0SRichard Henderson 94346dc1bc0SRichard Henderson fail: 94446dc1bc0SRichard Henderson /* Locate the first nibble that differs. */ 94546dc1bc0SRichard Henderson i = ctz64(mem_tag ^ ptr_tag) >> 4; 946dbf8c321SRichard Henderson mte_check_fail(env, desc, align_ptr + i * TAG_GRANULE, ra); 94746dc1bc0SRichard Henderson 94846dc1bc0SRichard Henderson done: 94946dc1bc0SRichard Henderson return useronly_clean_ptr(ptr); 95046dc1bc0SRichard Henderson } 951