193ef2c2fSPhilippe Mathieu-Daudé /* 293ef2c2fSPhilippe Mathieu-Daudé * TranslationBlock internal declarations (target specific) 393ef2c2fSPhilippe Mathieu-Daudé * 493ef2c2fSPhilippe Mathieu-Daudé * Copyright (c) 2003 Fabrice Bellard 593ef2c2fSPhilippe Mathieu-Daudé * 693ef2c2fSPhilippe Mathieu-Daudé * SPDX-License-Identifier: LGPL-2.1-or-later 793ef2c2fSPhilippe Mathieu-Daudé */ 893ef2c2fSPhilippe Mathieu-Daudé 993ef2c2fSPhilippe Mathieu-Daudé #ifndef ACCEL_TCG_TB_INTERNAL_TARGET_H 1093ef2c2fSPhilippe Mathieu-Daudé #define ACCEL_TCG_TB_INTERNAL_TARGET_H 1193ef2c2fSPhilippe Mathieu-Daudé 123e6bfabfSPhilippe Mathieu-Daudé #include "exec/cpu-all.h" 133e6bfabfSPhilippe Mathieu-Daudé #include "exec/exec-all.h" 143e6bfabfSPhilippe Mathieu-Daudé #include "exec/translation-block.h" 153e6bfabfSPhilippe Mathieu-Daudé 16bf4a155bSPhilippe Mathieu-Daudé /* 17bf4a155bSPhilippe Mathieu-Daudé * The true return address will often point to a host insn that is part of 18bf4a155bSPhilippe Mathieu-Daudé * the next translated guest insn. Adjust the address backward to point to 19bf4a155bSPhilippe Mathieu-Daudé * the middle of the call insn. Subtracting one would do the job except for 20bf4a155bSPhilippe Mathieu-Daudé * several compressed mode architectures (arm, mips) which set the low bit 21bf4a155bSPhilippe Mathieu-Daudé * to indicate the compressed mode; subtracting two works around that. It 22bf4a155bSPhilippe Mathieu-Daudé * is also the case that there are no host isas that contain a call insn 23bf4a155bSPhilippe Mathieu-Daudé * smaller than 4 bytes, so we don't worry about special-casing this. 24bf4a155bSPhilippe Mathieu-Daudé */ 25bf4a155bSPhilippe Mathieu-Daudé #define GETPC_ADJ 2 26bf4a155bSPhilippe Mathieu-Daudé 273504f104SPhilippe Mathieu-Daudé #ifdef CONFIG_SOFTMMU 283504f104SPhilippe Mathieu-Daudé 293504f104SPhilippe Mathieu-Daudé #define CPU_TLB_DYN_MIN_BITS 6 303504f104SPhilippe Mathieu-Daudé #define CPU_TLB_DYN_DEFAULT_BITS 8 313504f104SPhilippe Mathieu-Daudé 323504f104SPhilippe Mathieu-Daudé # if HOST_LONG_BITS == 32 333504f104SPhilippe Mathieu-Daudé /* Make sure we do not require a double-word shift for the TLB load */ 343504f104SPhilippe Mathieu-Daudé # define CPU_TLB_DYN_MAX_BITS (32 - TARGET_PAGE_BITS) 353504f104SPhilippe Mathieu-Daudé # else /* HOST_LONG_BITS == 64 */ 363504f104SPhilippe Mathieu-Daudé /* 373504f104SPhilippe Mathieu-Daudé * Assuming TARGET_PAGE_BITS==12, with 2**22 entries we can cover 2**(22+12) == 383504f104SPhilippe Mathieu-Daudé * 2**34 == 16G of address space. This is roughly what one would expect a 393504f104SPhilippe Mathieu-Daudé * TLB to cover in a modern (as of 2018) x86_64 CPU. For instance, Intel 403504f104SPhilippe Mathieu-Daudé * Skylake's Level-2 STLB has 16 1G entries. 413504f104SPhilippe Mathieu-Daudé * Also, make sure we do not size the TLB past the guest's address space. 423504f104SPhilippe Mathieu-Daudé */ 433504f104SPhilippe Mathieu-Daudé # ifdef TARGET_PAGE_BITS_VARY 443504f104SPhilippe Mathieu-Daudé # define CPU_TLB_DYN_MAX_BITS \ 453504f104SPhilippe Mathieu-Daudé MIN(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 463504f104SPhilippe Mathieu-Daudé # else 473504f104SPhilippe Mathieu-Daudé # define CPU_TLB_DYN_MAX_BITS \ 483504f104SPhilippe Mathieu-Daudé MIN_CONST(22, TARGET_VIRT_ADDR_SPACE_BITS - TARGET_PAGE_BITS) 493504f104SPhilippe Mathieu-Daudé # endif 503504f104SPhilippe Mathieu-Daudé # endif 513504f104SPhilippe Mathieu-Daudé 523504f104SPhilippe Mathieu-Daudé #endif /* CONFIG_SOFTMMU */ 533504f104SPhilippe Mathieu-Daudé 54*b103cc6eSRichard Henderson void tb_lock_page0(tb_page_addr_t); 55*b103cc6eSRichard Henderson 563e6bfabfSPhilippe Mathieu-Daudé #ifdef CONFIG_USER_ONLY 573e6bfabfSPhilippe Mathieu-Daudé /* 583e6bfabfSPhilippe Mathieu-Daudé * For user-only, page_protect sets the page read-only. 593e6bfabfSPhilippe Mathieu-Daudé * Since most execution is already on read-only pages, and we'd need to 603e6bfabfSPhilippe Mathieu-Daudé * account for other TBs on the same page, defer undoing any page protection 613e6bfabfSPhilippe Mathieu-Daudé * until we receive the write fault. 623e6bfabfSPhilippe Mathieu-Daudé */ 633e6bfabfSPhilippe Mathieu-Daudé static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1) 643e6bfabfSPhilippe Mathieu-Daudé { 65*b103cc6eSRichard Henderson tb_lock_page0(p1); 663e6bfabfSPhilippe Mathieu-Daudé } 673e6bfabfSPhilippe Mathieu-Daudé 683e6bfabfSPhilippe Mathieu-Daudé static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { } 693e6bfabfSPhilippe Mathieu-Daudé static inline void tb_unlock_pages(TranslationBlock *tb) { } 703e6bfabfSPhilippe Mathieu-Daudé #else 713e6bfabfSPhilippe Mathieu-Daudé void tb_lock_page1(tb_page_addr_t, tb_page_addr_t); 723e6bfabfSPhilippe Mathieu-Daudé void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t); 733e6bfabfSPhilippe Mathieu-Daudé void tb_unlock_pages(TranslationBlock *); 743e6bfabfSPhilippe Mathieu-Daudé #endif 753e6bfabfSPhilippe Mathieu-Daudé 763e6bfabfSPhilippe Mathieu-Daudé #ifdef CONFIG_SOFTMMU 773e6bfabfSPhilippe Mathieu-Daudé void tb_invalidate_phys_range_fast(ram_addr_t ram_addr, 783e6bfabfSPhilippe Mathieu-Daudé unsigned size, 793e6bfabfSPhilippe Mathieu-Daudé uintptr_t retaddr); 803e6bfabfSPhilippe Mathieu-Daudé #endif /* CONFIG_SOFTMMU */ 813e6bfabfSPhilippe Mathieu-Daudé 823e6bfabfSPhilippe Mathieu-Daudé bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc); 833e6bfabfSPhilippe Mathieu-Daudé 8493ef2c2fSPhilippe Mathieu-Daudé #endif 85