xref: /qemu/include/exec/cputlb.h (revision fc524567087c2537b5103cdfc1d41e4f442892b6)
1 /*
2  *  Common CPU TLB handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #ifndef CPUTLB_H
21 #define CPUTLB_H
22 
23 #include "exec/cpu-common.h"
24 #include "exec/hwaddr.h"
25 #include "exec/memattrs.h"
26 #include "exec/vaddr.h"
27 
28 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
29 void tlb_protect_code(ram_addr_t ram_addr);
30 void tlb_unprotect_code(ram_addr_t ram_addr);
31 #endif
32 
33 #ifndef CONFIG_USER_ONLY
34 void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length);
35 void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
36 #endif
37 
38 /**
39  * tlb_set_page_full:
40  * @cpu: CPU context
41  * @mmu_idx: mmu index of the tlb to modify
42  * @addr: virtual address of the entry to add
43  * @full: the details of the tlb entry
44  *
45  * Add an entry to @cpu tlb index @mmu_idx.  All of the fields of
46  * @full must be filled, except for xlat_section, and constitute
47  * the complete description of the translated page.
48  *
49  * This is generally called by the target tlb_fill function after
50  * having performed a successful page table walk to find the physical
51  * address and attributes for the translation.
52  *
53  * At most one entry for a given virtual address is permitted. Only a
54  * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
55  * used by tlb_flush_page.
56  */
57 void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
58                        CPUTLBEntryFull *full);
59 
60 /**
61  * tlb_set_page_with_attrs:
62  * @cpu: CPU to add this TLB entry for
63  * @addr: virtual address of page to add entry for
64  * @paddr: physical address of the page
65  * @attrs: memory transaction attributes
66  * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
67  * @mmu_idx: MMU index to insert TLB entry for
68  * @size: size of the page in bytes
69  *
70  * Add an entry to this CPU's TLB (a mapping from virtual address
71  * @addr to physical address @paddr) with the specified memory
72  * transaction attributes. This is generally called by the target CPU
73  * specific code after it has been called through the tlb_fill()
74  * entry point and performed a successful page table walk to find
75  * the physical address and attributes for the virtual address
76  * which provoked the TLB miss.
77  *
78  * At most one entry for a given virtual address is permitted. Only a
79  * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
80  * used by tlb_flush_page.
81  */
82 void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
83                              hwaddr paddr, MemTxAttrs attrs,
84                              int prot, int mmu_idx, vaddr size);
85 
86 /**
87  * tlb_set_page:
88  *
89  * This function is equivalent to calling tlb_set_page_with_attrs()
90  * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
91  * as a convenience for CPUs which don't use memory transaction attributes.
92  */
93 void tlb_set_page(CPUState *cpu, vaddr addr,
94                   hwaddr paddr, int prot,
95                   int mmu_idx, vaddr size);
96 
97 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
98 /**
99  * tlb_flush_page:
100  * @cpu: CPU whose TLB should be flushed
101  * @addr: virtual address of page to be flushed
102  *
103  * Flush one page from the TLB of the specified CPU, for all
104  * MMU indexes.
105  */
106 void tlb_flush_page(CPUState *cpu, vaddr addr);
107 
108 /**
109  * tlb_flush_page_all_cpus_synced:
110  * @cpu: src CPU of the flush
111  * @addr: virtual address of page to be flushed
112  *
113  * Flush one page from the TLB of all CPUs, for all
114  * MMU indexes.
115  *
116  * When this function returns, no CPUs will subsequently perform
117  * translations using the flushed TLBs.
118  */
119 void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
120 
121 /**
122  * tlb_flush:
123  * @cpu: CPU whose TLB should be flushed
124  *
125  * Flush the entire TLB for the specified CPU. Most CPU architectures
126  * allow the implementation to drop entries from the TLB at any time
127  * so this is generally safe. If more selective flushing is required
128  * use one of the other functions for efficiency.
129  */
130 void tlb_flush(CPUState *cpu);
131 
132 /**
133  * tlb_flush_all_cpus_synced:
134  * @cpu: src CPU of the flush
135  *
136  * Flush the entire TLB for all CPUs, for all MMU indexes.
137  *
138  * When this function returns, no CPUs will subsequently perform
139  * translations using the flushed TLBs.
140  */
141 void tlb_flush_all_cpus_synced(CPUState *src_cpu);
142 
143 /**
144  * tlb_flush_page_by_mmuidx:
145  * @cpu: CPU whose TLB should be flushed
146  * @addr: virtual address of page to be flushed
147  * @idxmap: bitmap of MMU indexes to flush
148  *
149  * Flush one page from the TLB of the specified CPU, for the specified
150  * MMU indexes.
151  */
152 void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
153                               uint16_t idxmap);
154 
155 /**
156  * tlb_flush_page_by_mmuidx_all_cpus_synced:
157  * @cpu: Originating CPU of the flush
158  * @addr: virtual address of page to be flushed
159  * @idxmap: bitmap of MMU indexes to flush
160  *
161  * Flush one page from the TLB of all CPUs, for the specified
162  * MMU indexes.
163  *
164  * When this function returns, no CPUs will subsequently perform
165  * translations using the flushed TLBs.
166  */
167 void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
168                                               uint16_t idxmap);
169 
170 /**
171  * tlb_flush_by_mmuidx:
172  * @cpu: CPU whose TLB should be flushed
173  * @wait: If true ensure synchronisation by exiting the cpu_loop
174  * @idxmap: bitmap of MMU indexes to flush
175  *
176  * Flush all entries from the TLB of the specified CPU, for the specified
177  * MMU indexes.
178  */
179 void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
180 
181 /**
182  * tlb_flush_by_mmuidx_all_cpus_synced:
183  * @cpu: Originating CPU of the flush
184  * @idxmap: bitmap of MMU indexes to flush
185  *
186  * Flush all entries from the TLB of all CPUs, for the specified
187  * MMU indexes.
188  *
189  * When this function returns, no CPUs will subsequently perform
190  * translations using the flushed TLBs.
191  */
192 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
193 
194 /**
195  * tlb_flush_page_bits_by_mmuidx
196  * @cpu: CPU whose TLB should be flushed
197  * @addr: virtual address of page to be flushed
198  * @idxmap: bitmap of mmu indexes to flush
199  * @bits: number of significant bits in address
200  *
201  * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
202  */
203 void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
204                                    uint16_t idxmap, unsigned bits);
205 
206 /* Similarly, with broadcast and syncing. */
207 void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
208                                                    uint16_t idxmap,
209                                                    unsigned bits);
210 
211 /**
212  * tlb_flush_range_by_mmuidx
213  * @cpu: CPU whose TLB should be flushed
214  * @addr: virtual address of the start of the range to be flushed
215  * @len: length of range to be flushed
216  * @idxmap: bitmap of mmu indexes to flush
217  * @bits: number of significant bits in address
218  *
219  * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
220  * comparing only the low @bits worth of each virtual page.
221  */
222 void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
223                                vaddr len, uint16_t idxmap,
224                                unsigned bits);
225 
226 /* Similarly, with broadcast and syncing. */
227 void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
228                                                vaddr addr,
229                                                vaddr len,
230                                                uint16_t idxmap,
231                                                unsigned bits);
232 #else
tlb_flush_page(CPUState * cpu,vaddr addr)233 static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
234 {
235 }
tlb_flush_page_all_cpus_synced(CPUState * src,vaddr addr)236 static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
237 {
238 }
tlb_flush(CPUState * cpu)239 static inline void tlb_flush(CPUState *cpu)
240 {
241 }
tlb_flush_all_cpus_synced(CPUState * src_cpu)242 static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
243 {
244 }
tlb_flush_page_by_mmuidx(CPUState * cpu,vaddr addr,uint16_t idxmap)245 static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
246                                             vaddr addr, uint16_t idxmap)
247 {
248 }
249 
tlb_flush_by_mmuidx(CPUState * cpu,uint16_t idxmap)250 static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
251 {
252 }
tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState * cpu,vaddr addr,uint16_t idxmap)253 static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
254                                                             vaddr addr,
255                                                             uint16_t idxmap)
256 {
257 }
tlb_flush_by_mmuidx_all_cpus_synced(CPUState * cpu,uint16_t idxmap)258 static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
259                                                        uint16_t idxmap)
260 {
261 }
tlb_flush_page_bits_by_mmuidx(CPUState * cpu,vaddr addr,uint16_t idxmap,unsigned bits)262 static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
263                                                  vaddr addr,
264                                                  uint16_t idxmap,
265                                                  unsigned bits)
266 {
267 }
268 static inline void
tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState * cpu,vaddr addr,uint16_t idxmap,unsigned bits)269 tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
270                                               uint16_t idxmap, unsigned bits)
271 {
272 }
tlb_flush_range_by_mmuidx(CPUState * cpu,vaddr addr,vaddr len,uint16_t idxmap,unsigned bits)273 static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
274                                              vaddr len, uint16_t idxmap,
275                                              unsigned bits)
276 {
277 }
tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState * cpu,vaddr addr,vaddr len,uint16_t idxmap,unsigned bits)278 static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
279                                                              vaddr addr,
280                                                              vaddr len,
281                                                              uint16_t idxmap,
282                                                              unsigned bits)
283 {
284 }
285 #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
286 #endif /* CPUTLB_H */
287