xref: /qemu/include/exec/memop.h (revision ffbc5e661fc3b73debaec2354bf46273186bf882)
1 /*
2  * Constants for memory operations
3  *
4  * Authors:
5  *  Richard Henderson <rth@twiddle.net>
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #ifndef MEMOP_H
13 #define MEMOP_H
14 
15 #include "qemu/host-utils.h"
16 
17 typedef enum MemOp {
18     MO_8     = 0,
19     MO_16    = 1,
20     MO_32    = 2,
21     MO_64    = 3,
22     MO_128   = 4,
23     MO_256   = 5,
24     MO_512   = 6,
25     MO_1024  = 7,
26     MO_SIZE  = 0x07,   /* Mask for the above.  */
27 
28     MO_SIGN  = 0x08,   /* Sign-extended, otherwise zero-extended.  */
29 
30     MO_BSWAP = 0x10,   /* Host reverse endian.  */
31 #if HOST_BIG_ENDIAN
32     MO_LE    = MO_BSWAP,
33     MO_BE    = 0,
34 #else
35     MO_LE    = 0,
36     MO_BE    = MO_BSWAP,
37 #endif
38 #ifdef COMPILING_PER_TARGET
39 #if TARGET_BIG_ENDIAN
40     MO_TE    = MO_BE,
41 #else
42     MO_TE    = MO_LE,
43 #endif
44 #endif
45 
46     /*
47      * MO_UNALN accesses are never checked for alignment.
48      * MO_ALIGN accesses will result in a call to the CPU's
49      * do_unaligned_access hook if the guest address is not aligned.
50      *
51      * Some architectures (e.g. ARMv8) need the address which is aligned
52      * to a size more than the size of the memory access.
53      * Some architectures (e.g. SPARCv9) need an address which is aligned,
54      * but less strictly than the natural alignment.
55      *
56      * MO_ALIGN supposes the alignment size is the size of a memory access.
57      *
58      * There are three options:
59      * - unaligned access permitted (MO_UNALN).
60      * - an alignment to the size of an access (MO_ALIGN);
61      * - an alignment to a specified size, which may be more or less than
62      *   the access size (MO_ALIGN_x where 'x' is a size in bytes);
63      */
64     MO_ASHIFT = 5,
65     MO_AMASK = 0x7 << MO_ASHIFT,
66     MO_UNALN    = 0,
67     MO_ALIGN_2  = 1 << MO_ASHIFT,
68     MO_ALIGN_4  = 2 << MO_ASHIFT,
69     MO_ALIGN_8  = 3 << MO_ASHIFT,
70     MO_ALIGN_16 = 4 << MO_ASHIFT,
71     MO_ALIGN_32 = 5 << MO_ASHIFT,
72     MO_ALIGN_64 = 6 << MO_ASHIFT,
73     MO_ALIGN    = MO_AMASK,
74 
75     /*
76      * MO_ATOM_* describes the atomicity requirements of the operation:
77      * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it
78      *    is aligned; if unaligned there is no atomicity.
79      * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to
80      *    be a pair of half-sized operations which are packed together
81      *    for convenience, with single-copy atomicity on each half if
82      *    the half is aligned.
83      *    This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP.
84      * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it
85      *    is unaligned, so long as it does not cross a 16-byte boundary;
86      *    if it crosses a 16-byte boundary there is no atomicity.
87      *    This is the atomicity e.g. of Arm FEAT_LSE2 LDR.
88      * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic,
89      *    if it happens to be within a 16-byte boundary, otherwise it
90      *    devolves to a pair of half-sized MO_ATOM_WITHIN16 operations.
91      *    Depending on alignment, one or both will be single-copy atomic.
92      *    This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
93      * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
94      *    by the alignment.  E.g. if an 8-byte value is accessed at an
95      *    address which is 0 mod 8, then the whole 8-byte access is
96      *    single-copy atomic; otherwise, if it is accessed at 0 mod 4
97      *    then each 4-byte subobject is single-copy atomic; otherwise
98      *    if it is accessed at 0 mod 2 then the four 2-byte subobjects
99      *    are single-copy atomic.
100      *    This is the atomicity e.g. of IBM Power.
101      * MO_ATOM_NONE: the operation has no atomicity requirements.
102      *
103      * Note the default (i.e. 0) value is single-copy atomic to the
104      * size of the operation, if aligned.  This retains the behaviour
105      * from before this field was introduced.
106      */
107     MO_ATOM_SHIFT         = 8,
108     MO_ATOM_IFALIGN       = 0 << MO_ATOM_SHIFT,
109     MO_ATOM_IFALIGN_PAIR  = 1 << MO_ATOM_SHIFT,
110     MO_ATOM_WITHIN16      = 2 << MO_ATOM_SHIFT,
111     MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT,
112     MO_ATOM_SUBALIGN      = 4 << MO_ATOM_SHIFT,
113     MO_ATOM_NONE          = 5 << MO_ATOM_SHIFT,
114     MO_ATOM_MASK          = 7 << MO_ATOM_SHIFT,
115 
116     /* Combinations of the above, for ease of use.  */
117     MO_UB    = MO_8,
118     MO_UW    = MO_16,
119     MO_UL    = MO_32,
120     MO_UQ    = MO_64,
121     MO_UO    = MO_128,
122     MO_SB    = MO_SIGN | MO_8,
123     MO_SW    = MO_SIGN | MO_16,
124     MO_SL    = MO_SIGN | MO_32,
125     MO_SQ    = MO_SIGN | MO_64,
126     MO_SO    = MO_SIGN | MO_128,
127 
128     MO_LEUW  = MO_LE | MO_UW,
129     MO_LEUL  = MO_LE | MO_UL,
130     MO_LEUQ  = MO_LE | MO_UQ,
131     MO_LESW  = MO_LE | MO_SW,
132     MO_LESL  = MO_LE | MO_SL,
133     MO_LESQ  = MO_LE | MO_SQ,
134 
135     MO_BEUW  = MO_BE | MO_UW,
136     MO_BEUL  = MO_BE | MO_UL,
137     MO_BEUQ  = MO_BE | MO_UQ,
138     MO_BESW  = MO_BE | MO_SW,
139     MO_BESL  = MO_BE | MO_SL,
140     MO_BESQ  = MO_BE | MO_SQ,
141 
142 #ifdef COMPILING_PER_TARGET
143     MO_TEUW  = MO_TE | MO_UW,
144     MO_TEUL  = MO_TE | MO_UL,
145     MO_TEUQ  = MO_TE | MO_UQ,
146     MO_TEUO  = MO_TE | MO_UO,
147     MO_TESW  = MO_TE | MO_SW,
148     MO_TESL  = MO_TE | MO_SL,
149     MO_TESQ  = MO_TE | MO_SQ,
150 #endif
151 
152     MO_SSIZE = MO_SIZE | MO_SIGN,
153 } MemOp;
154 
155 /* MemOp to size in bytes.  */
memop_size(MemOp op)156 static inline unsigned memop_size(MemOp op)
157 {
158     return 1 << (op & MO_SIZE);
159 }
160 
161 /* Size in bytes to MemOp.  */
size_memop(unsigned size)162 static inline MemOp size_memop(unsigned size)
163 {
164 #ifdef CONFIG_DEBUG_TCG
165     /* Power of 2 up to 8.  */
166     assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
167 #endif
168     return (MemOp)ctz32(size);
169 }
170 
171 /**
172  * memop_alignment_bits:
173  * @memop: MemOp value
174  *
175  * Extract the alignment size from the memop.
176  */
memop_alignment_bits(MemOp memop)177 static inline unsigned memop_alignment_bits(MemOp memop)
178 {
179     unsigned a = memop & MO_AMASK;
180 
181     if (a == MO_UNALN) {
182         /* No alignment required.  */
183         a = 0;
184     } else if (a == MO_ALIGN) {
185         /* A natural alignment requirement.  */
186         a = memop & MO_SIZE;
187     } else {
188         /* A specific alignment requirement.  */
189         a = a >> MO_ASHIFT;
190     }
191     return a;
192 }
193 
194 /*
195  * memop_atomicity_bits:
196  * @memop: MemOp value
197  *
198  * Extract the atomicity size from the memop.
199  */
memop_atomicity_bits(MemOp memop)200 static inline unsigned memop_atomicity_bits(MemOp memop)
201 {
202     unsigned size = memop & MO_SIZE;
203 
204     switch (memop & MO_ATOM_MASK) {
205     case MO_ATOM_NONE:
206         size = MO_8;
207         break;
208     case MO_ATOM_IFALIGN_PAIR:
209     case MO_ATOM_WITHIN16_PAIR:
210         size = size ? size - 1 : 0;
211         break;
212     default:
213         break;
214     }
215     return size;
216 }
217 
218 #endif
219