1# M-profile MVE instruction descriptions 2# 3# Copyright (c) 2021 Linaro, Ltd 4# 5# This library is free software; you can redistribute it and/or 6# modify it under the terms of the GNU Lesser General Public 7# License as published by the Free Software Foundation; either 8# version 2.1 of the License, or (at your option) any later version. 9# 10# This library is distributed in the hope that it will be useful, 11# but WITHOUT ANY WARRANTY; without even the implied warranty of 12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13# Lesser General Public License for more details. 14# 15# You should have received a copy of the GNU Lesser General Public 16# License along with this library; if not, see <http://www.gnu.org/licenses/>. 17 18# 19# This file is processed by scripts/decodetree.py 20# 21 22%qd 22:1 13:3 23%qm 5:1 1:3 24%qn 7:1 17:3 25 26# VQDMULL has size in bit 28: 0 for 16 bit, 1 for 32 bit 27%size_28 28:1 !function=plus_1 28 29# 2 operand fp insns have size in bit 20: 1 for 16 bit, 0 for 32 bit, 30# like Neon FP insns. 31%2op_fp_size 20:1 !function=neon_3same_fp_size 32# VCADD is an exception, where bit 20 is 0 for 16 bit and 1 for 32 bit 33%2op_fp_size_rev 20:1 !function=plus_1 34# FP scalars have size in bit 28, 1 for 16 bit, 0 for 32 bit 35%2op_fp_scalar_size 28:1 !function=neon_3same_fp_size 36 37# 1imm format immediate 38%imm_28_16_0 28:1 16:3 0:4 39 40&vldr_vstr rn qd imm p a w size l u 41&1op qd qm size 42&2op qd qm qn size 43&2scalar qd qn rm size 44&1imm qd imm cmode op 45&2shift qd qm shift size 46&vidup qd rn size imm 47&viwdup qd rn rm size imm 48&vcmp qm qn size mask 49&vcmp_scalar qn rm size mask 50&shl_scalar qda rm size 51&vmaxv qm rda size 52&vabav qn qm rda size 53&vldst_sg qd qm rn size msize os 54&vldst_sg_imm qd qm a w imm 55&vldst_il qd rn size pat w 56 57# scatter-gather memory size is in bits 6:4 58%sg_msize 6:1 4:1 59 60@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0 61# Note that both Rn and Qd are 3 bits only (no D bit) 62@vldst_wn ... u:1 ... . . . . l:1 . rn:3 qd:3 . ... .. imm:7 &vldr_vstr 63 64@vldst_sg .... .... .... rn:4 .... ... size:2 ... ... os:1 &vldst_sg \ 65 qd=%qd qm=%qm msize=%sg_msize 66 67# Qm is in the fields usually labeled Qn 68@vldst_sg_imm .... .... a:1 . w:1 . .... .... .... . imm:7 &vldst_sg_imm \ 69 qd=%qd qm=%qn 70 71# Deinterleaving load/interleaving store 72@vldst_il .... .... .. w:1 . rn:4 .... ... size:2 pat:2 ..... &vldst_il \ 73 qd=%qd 74 75@1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm 76@1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0 77@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn 78@2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0 79@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \ 80 size=%size_28 81@1imm .... .... .... .... .... cmode:4 .. op:1 . .... &1imm qd=%qd imm=%imm_28_16_0 82 83# The _rev suffix indicates that Vn and Vm are reversed. This is 84# the case for shifts. In the Arm ARM these insns are documented 85# with the Vm and Vn fields in their usual places, but in the 86# assembly the operands are listed "backwards", ie in the order 87# Qd, Qm, Qn where other insns use Qd, Qn, Qm. For QEMU we choose 88# to consider Vm and Vn as being in different fields in the insn. 89# This gives us consistency with A64 and Neon. 90@2op_rev .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qn qn=%qm 91 92@2scalar .... .... .. size:2 .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn 93@2scalar_nosz .... .... .... .... .... .... .... rm:4 &2scalar qd=%qd qn=%qn 94 95@2_shl_b .... .... .. 001 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0 96@2_shl_h .... .... .. 01 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1 97@2_shl_w .... .... .. 1 shift:5 .... .... .... .... &2shift qd=%qd qm=%qm size=2 98 99@2_shll_b .... .... ... 01 shift:3 .... .... .... .... &2shift qd=%qd qm=%qm size=0 100@2_shll_h .... .... ... 1 shift:4 .... .... .... .... &2shift qd=%qd qm=%qm size=1 101# VSHLL encoding T2 where shift == esize 102@2_shll_esize_b .... .... .... 00 .. .... .... .... .... &2shift \ 103 qd=%qd qm=%qm size=0 shift=8 104@2_shll_esize_h .... .... .... 01 .. .... .... .... .... &2shift \ 105 qd=%qd qm=%qm size=1 shift=16 106 107# Right shifts are encoded as N - shift, where N is the element size in bits. 108%rshift_i5 16:5 !function=rsub_32 109%rshift_i4 16:4 !function=rsub_16 110%rshift_i3 16:3 !function=rsub_8 111 112@2_shr_b .... .... .. 001 ... .... .... .... .... &2shift qd=%qd qm=%qm \ 113 size=0 shift=%rshift_i3 114@2_shr_h .... .... .. 01 .... .... .... .... .... &2shift qd=%qd qm=%qm \ 115 size=1 shift=%rshift_i4 116@2_shr_w .... .... .. 1 ..... .... .... .... .... &2shift qd=%qd qm=%qm \ 117 size=2 shift=%rshift_i5 118 119@shl_scalar .... .... .... size:2 .. .... .... .... rm:4 &shl_scalar qda=%qd 120 121# Vector comparison; 4-bit Qm but 3-bit Qn 122%mask_22_13 22:1 13:3 123@vcmp .... .... .. size:2 qn:3 . .... .... .... .... &vcmp qm=%qm mask=%mask_22_13 124@vcmp_scalar .... .... .. size:2 qn:3 . .... .... .... rm:4 &vcmp_scalar \ 125 mask=%mask_22_13 126 127@vcmp_fp .... .... .... qn:3 . .... .... .... .... &vcmp \ 128 qm=%qm size=%2op_fp_scalar_size mask=%mask_22_13 129 130# Bit 28 is a 2op_fp_scalar_size bit, but we do not decode it in this 131# format to avoid complicated overlapping-instruction-groups 132@vcmp_fp_scalar .... .... .... qn:3 . .... .... .... rm:4 &vcmp_scalar \ 133 mask=%mask_22_13 134 135@vmaxv .... .... .... size:2 .. rda:4 .... .... .... &vmaxv qm=%qm 136 137@2op_fp .... .... .... .... .... .... .... .... &2op \ 138 qd=%qd qn=%qn qm=%qm size=%2op_fp_size 139 140@2op_fp_size_rev .... .... .... .... .... .... .... .... &2op \ 141 qd=%qd qn=%qn qm=%qm size=%2op_fp_size_rev 142 143# 2-operand, but Qd and Qn share a field. Size is in bit 28, but we 144# don't decode it in this format 145@vmaxnma .... .... .... .... .... .... .... .... &2op \ 146 qd=%qd qn=%qd qm=%qm 147 148# Here also we don't decode the bit 28 size in the format to avoid 149# awkward nested overlap groups 150@vmaxnmv .... .... .... .... rda:4 .... .... .... &vmaxv qm=%qm 151 152@2op_fp_scalar .... .... .... .... .... .... .... rm:4 &2scalar \ 153 qd=%qd qn=%qn size=%2op_fp_scalar_size 154 155# Vector loads and stores 156 157# Widening loads and narrowing stores: 158# for these P=0 W=0 is 'related encoding'; sz=11 is 'related encoding' 159# This means we need to expand out to multiple patterns for P, W, SZ. 160# For stores the U bit must be 0 but we catch that in the trans_ function. 161# The naming scheme here is "VLDSTB_H == in-memory byte load/store to/from 162# signed halfword element in register", etc. 163VLDSTB_H 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 01 ....... @vldst_wn \ 164 p=0 w=1 size=1 165VLDSTB_H 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 01 ....... @vldst_wn \ 166 p=1 size=1 167VLDSTB_W 111 . 110 0 a:1 0 1 . 0 ... ... 0 111 10 ....... @vldst_wn \ 168 p=0 w=1 size=2 169VLDSTB_W 111 . 110 1 a:1 0 w:1 . 0 ... ... 0 111 10 ....... @vldst_wn \ 170 p=1 size=2 171VLDSTH_W 111 . 110 0 a:1 0 1 . 1 ... ... 0 111 10 ....... @vldst_wn \ 172 p=0 w=1 size=2 173VLDSTH_W 111 . 110 1 a:1 0 w:1 . 1 ... ... 0 111 10 ....... @vldst_wn \ 174 p=1 size=2 175 176# Non-widening loads/stores (P=0 W=0 is 'related encoding') 177VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111100 ....... @vldr_vstr \ 178 size=0 p=0 w=1 179VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111101 ....... @vldr_vstr \ 180 size=1 p=0 w=1 181VLDR_VSTR 1110110 0 a:1 . 1 . .... ... 111110 ....... @vldr_vstr \ 182 size=2 p=0 w=1 183VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111100 ....... @vldr_vstr \ 184 size=0 p=1 185VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111101 ....... @vldr_vstr \ 186 size=1 p=1 187VLDR_VSTR 1110110 1 a:1 . w:1 . .... ... 111110 ....... @vldr_vstr \ 188 size=2 p=1 189 190# gather loads/scatter stores 191VLDR_S_sg 111 0 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg 192VLDR_U_sg 111 1 1100 1 . 01 .... ... 0 111 . .... .... @vldst_sg 193VSTR_sg 111 0 1100 1 . 00 .... ... 0 111 . .... .... @vldst_sg 194 195VLDRW_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1110 .... .... @vldst_sg_imm 196VLDRD_sg_imm 111 1 1101 ... 1 ... 0 ... 1 1111 .... .... @vldst_sg_imm 197VSTRW_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1110 .... .... @vldst_sg_imm 198VSTRD_sg_imm 111 1 1101 ... 0 ... 0 ... 1 1111 .... .... @vldst_sg_imm 199 200# deinterleaving loads/interleaving stores 201VLD2 1111 1100 1 .. 1 .... ... 1 111 .. .. 00000 @vldst_il 202VLD4 1111 1100 1 .. 1 .... ... 1 111 .. .. 00001 @vldst_il 203VST2 1111 1100 1 .. 0 .... ... 1 111 .. .. 00000 @vldst_il 204VST4 1111 1100 1 .. 0 .... ... 1 111 .. .. 00001 @vldst_il 205 206# Moves between 2 32-bit vector lanes and 2 general purpose registers 207VMOV_to_2gp 1110 1100 0 . 00 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd 208VMOV_from_2gp 1110 1100 0 . 01 rt2:4 ... 0 1111 000 idx:1 rt:4 qd=%qd 209 210# Vector 2-op 211VAND 1110 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz 212VBIC 1110 1111 0 . 01 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz 213VORR 1110 1111 0 . 10 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz 214VORN 1110 1111 0 . 11 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz 215VEOR 1111 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz 216 217VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op 218VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op 219VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op 220 221# The VSHLL T2 encoding is not a @2op pattern, but is here because it 222# overlaps what would be size=0b11 VMULH/VRMULH 223{ 224 VMAXNMA 111 0 1110 0 . 11 1111 ... 0 1110 1 0 . 0 ... 1 @vmaxnma size=2 225 226 VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b 227 VSHLL_BS 111 0 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h 228 229 VQMOVUNB 111 0 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op 230 VQMOVN_BS 111 0 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op 231 232 VMAXA 111 0 1110 0 . 11 .. 11 ... 0 1110 1 0 . 0 ... 1 @1op 233 234 VMULH_S 111 0 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op 235} 236 237{ 238 VMAXNMA 111 1 1110 0 . 11 1111 ... 0 1110 1 0 . 0 ... 1 @vmaxnma size=1 239 240 VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_b 241 VSHLL_BU 111 1 1110 0 . 11 .. 01 ... 0 1110 0 0 . 0 ... 1 @2_shll_esize_h 242 243 VMOVNB 111 1 1110 0 . 11 .. 01 ... 0 1110 1 0 . 0 ... 1 @1op 244 VQMOVN_BU 111 1 1110 0 . 11 .. 11 ... 0 1110 0 0 . 0 ... 1 @1op 245 246 VMULH_U 111 1 1110 0 . .. ...1 ... 0 1110 . 0 . 0 ... 1 @2op 247} 248 249{ 250 VMINNMA 111 0 1110 0 . 11 1111 ... 1 1110 1 0 . 0 ... 1 @vmaxnma size=2 251 VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b 252 VSHLL_TS 111 0 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h 253 254 VQMOVUNT 111 0 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op 255 VQMOVN_TS 111 0 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op 256 257 VMINA 111 0 1110 0 . 11 .. 11 ... 1 1110 1 0 . 0 ... 1 @1op 258 259 VRMULH_S 111 0 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op 260} 261 262{ 263 VMINNMA 111 1 1110 0 . 11 1111 ... 1 1110 1 0 . 0 ... 1 @vmaxnma size=1 264 VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_b 265 VSHLL_TU 111 1 1110 0 . 11 .. 01 ... 1 1110 0 0 . 0 ... 1 @2_shll_esize_h 266 267 VMOVNT 111 1 1110 0 . 11 .. 01 ... 1 1110 1 0 . 0 ... 1 @1op 268 VQMOVN_TU 111 1 1110 0 . 11 .. 11 ... 1 1110 0 0 . 0 ... 1 @1op 269 270 VRMULH_U 111 1 1110 0 . .. ...1 ... 1 1110 . 0 . 0 ... 1 @2op 271} 272 273VMAX_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op 274VMAX_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 0 ... 0 @2op 275VMIN_S 111 0 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op 276VMIN_U 111 1 1111 0 . .. ... 0 ... 0 0110 . 1 . 1 ... 0 @2op 277 278VABD_S 111 0 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op 279VABD_U 111 1 1111 0 . .. ... 0 ... 0 0111 . 1 . 0 ... 0 @2op 280 281VHADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op 282VHADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 0 ... 0 @2op 283VHSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op 284VHSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 0 ... 0 @2op 285 286{ 287 VMULLP_B 111 . 1110 0 . 11 ... 1 ... 0 1110 . 0 . 0 ... 0 @2op_sz28 288 VMULL_BS 111 0 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op 289 VMULL_BU 111 1 1110 0 . .. ... 1 ... 0 1110 . 0 . 0 ... 0 @2op 290} 291{ 292 VMULLP_T 111 . 1110 0 . 11 ... 1 ... 1 1110 . 0 . 0 ... 0 @2op_sz28 293 VMULL_TS 111 0 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op 294 VMULL_TU 111 1 1110 0 . .. ... 1 ... 1 1110 . 0 . 0 ... 0 @2op 295} 296 297VQDMULH 1110 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op 298VQRDMULH 1111 1111 0 . .. ... 0 ... 0 1011 . 1 . 0 ... 0 @2op 299 300VQADD_S 111 0 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op 301VQADD_U 111 1 1111 0 . .. ... 0 ... 0 0000 . 1 . 1 ... 0 @2op 302VQSUB_S 111 0 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op 303VQSUB_U 111 1 1111 0 . .. ... 0 ... 0 0010 . 1 . 1 ... 0 @2op 304 305VSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev 306VSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 0 ... 0 @2op_rev 307 308VRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev 309VRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 0 ... 0 @2op_rev 310 311VQSHL_S 111 0 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev 312VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev 313 314VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev 315VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev 316 317{ 318 VCMUL0 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 0 @2op_sz28 319 VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op 320 VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op 321} 322 323{ 324 VCMUL180 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 0 @2op_sz28 325 VQDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op 326 VQDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op 327} 328 329{ 330 VCMUL90 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 1 @2op_sz28 331 VQRDMLADH 111 0 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op 332 VQRDMLSDH 111 1 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op 333} 334 335{ 336 VCMUL270 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 1 @2op_sz28 337 VQRDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op 338 VQRDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op 339} 340 341VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28 342VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28 343 344VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op 345VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op 346 347{ 348 VADC 1110 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz 349 VADCI 1110 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz 350 VHCADD90 1110 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op 351 VHCADD270 1110 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op 352} 353 354{ 355 VSBC 1111 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 0 @2op_nosz 356 VSBCI 1111 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 0 @2op_nosz 357 VCADD90 1111 1110 0 . .. ... 0 ... 0 1111 . 0 . 0 ... 0 @2op 358 VCADD270 1111 1110 0 . .. ... 0 ... 1 1111 . 0 . 0 ... 0 @2op 359} 360 361# Vector miscellaneous 362 363VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op 364VCLZ 1111 1111 1 . 11 .. 00 ... 0 0100 11 . 0 ... 0 @1op 365 366VREV16 1111 1111 1 . 11 .. 00 ... 0 0001 01 . 0 ... 0 @1op 367VREV32 1111 1111 1 . 11 .. 00 ... 0 0000 11 . 0 ... 0 @1op 368VREV64 1111 1111 1 . 11 .. 00 ... 0 0000 01 . 0 ... 0 @1op 369 370VMVN 1111 1111 1 . 11 00 00 ... 0 0101 11 . 0 ... 0 @1op_nosz 371 372VABS 1111 1111 1 . 11 .. 01 ... 0 0011 01 . 0 ... 0 @1op 373VABS_fp 1111 1111 1 . 11 .. 01 ... 0 0111 01 . 0 ... 0 @1op 374VNEG 1111 1111 1 . 11 .. 01 ... 0 0011 11 . 0 ... 0 @1op 375VNEG_fp 1111 1111 1 . 11 .. 01 ... 0 0111 11 . 0 ... 0 @1op 376 377VQABS 1111 1111 1 . 11 .. 00 ... 0 0111 01 . 0 ... 0 @1op 378VQNEG 1111 1111 1 . 11 .. 00 ... 0 0111 11 . 0 ... 0 @1op 379 380&vdup qd rt size 381# Qd is in the fields usually named Qn 382@vdup .... .... . . .. ... . rt:4 .... . . . . .... qd=%qn &vdup 383 384# B and E bits encode size, which we decode here to the usual size values 385VDUP 1110 1110 1 1 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=0 386VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 1 1 0000 @vdup size=1 387VDUP 1110 1110 1 0 10 ... 0 .... 1011 . 0 0 1 0000 @vdup size=2 388 389# Incrementing and decrementing dup 390 391# VIDUP, VDDUP format immediate: 1 << (immh:imml) 392%imm_vidup 7:1 0:1 !function=vidup_imm 393 394# VIDUP, VDDUP registers: Rm bits [3:1] from insn, bit 0 is 1; 395# Rn bits [3:1] from insn, bit 0 is 0 396%vidup_rm 1:3 !function=times_2_plus_1 397%vidup_rn 17:3 !function=times_2 398 399@vidup .... .... . . size:2 .... .... .... .... .... \ 400 qd=%qd imm=%imm_vidup rn=%vidup_rn &vidup 401@viwdup .... .... . . size:2 .... .... .... .... .... \ 402 qd=%qd imm=%imm_vidup rm=%vidup_rm rn=%vidup_rn &viwdup 403{ 404 VIDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 111 . @vidup 405 VIWDUP 1110 1110 0 . .. ... 1 ... 0 1111 . 110 ... . @viwdup 406} 407{ 408 VCMPGT_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 0110 .... @vcmp_fp_scalar size=2 409 VCMPLE_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 1110 .... @vcmp_fp_scalar size=2 410 VDDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 111 . @vidup 411 VDWDUP 1110 1110 0 . .. ... 1 ... 1 1111 . 110 ... . @viwdup 412} 413 414# multiply-add long dual accumulate 415# rdahi: bits [3:1] from insn, bit 0 is 1 416# rdalo: bits [3:1] from insn, bit 0 is 0 417%rdahi 20:3 !function=times_2_plus_1 418%rdalo 13:3 !function=times_2 419# size bit is 0 for 16 bit, 1 for 32 bit 420%size_16 16:1 !function=plus_1 421 422&vmlaldav rdahi rdalo size qn qm x a 423&vmladav rda size qn qm x a 424 425@vmlaldav .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \ 426 qn=%qn rdahi=%rdahi rdalo=%rdalo size=%size_16 &vmlaldav 427@vmlaldav_nosz .... .... . ... ... . ... x:1 .... .. a:1 . qm:3 . \ 428 qn=%qn rdahi=%rdahi rdalo=%rdalo size=0 &vmlaldav 429@vmladav .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \ 430 qn=%qn rda=%rdalo size=%size_16 &vmladav 431@vmladav_nosz .... .... .... ... . ... x:1 .... . . a:1 . qm:3 . \ 432 qn=%qn rda=%rdalo size=0 &vmladav 433 434{ 435 VMLADAV_S 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav 436 VMLALDAV_S 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav 437} 438{ 439 VMLADAV_U 1111 1110 1111 ... . ... . 1110 . 0 . 0 ... 0 @vmladav 440 VMLALDAV_U 1111 1110 1 ... ... . ... . 1110 . 0 . 0 ... 0 @vmlaldav 441} 442 443{ 444 VMLSDAV 1110 1110 1111 ... . ... . 1110 . 0 . 0 ... 1 @vmladav 445 VMLSLDAV 1110 1110 1 ... ... . ... . 1110 . 0 . 0 ... 1 @vmlaldav 446} 447 448{ 449 VMLSDAV 1111 1110 1111 ... 0 ... . 1110 . 0 . 0 ... 1 @vmladav_nosz 450 VRMLSLDAVH 1111 1110 1 ... ... 0 ... . 1110 . 0 . 0 ... 1 @vmlaldav_nosz 451} 452 453VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz 454VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 1 @vmladav_nosz 455 456{ 457 [ 458 VMAXNMAV 1110 1110 1110 11 00 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=2 459 VMINNMAV 1110 1110 1110 11 00 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=2 460 VMAXNMV 1110 1110 1110 11 10 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=2 461 VMINNMV 1110 1110 1110 11 10 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=2 462 ] 463 [ 464 VMAXV_S 1110 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv 465 VMINV_S 1110 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv 466 VMAXAV 1110 1110 1110 .. 00 .... 1111 0 0 . 0 ... 0 @vmaxv 467 VMINAV 1110 1110 1110 .. 00 .... 1111 1 0 . 0 ... 0 @vmaxv 468 ] 469 VMLADAV_S 1110 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz 470 VRMLALDAVH_S 1110 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz 471} 472 473{ 474 [ 475 VMAXNMAV 1111 1110 1110 11 00 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=1 476 VMINNMAV 1111 1110 1110 11 00 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=1 477 VMAXNMV 1111 1110 1110 11 10 .... 1111 0 0 . 0 ... 0 @vmaxnmv size=1 478 VMINNMV 1111 1110 1110 11 10 .... 1111 1 0 . 0 ... 0 @vmaxnmv size=1 479 ] 480 [ 481 VMAXV_U 1111 1110 1110 .. 10 .... 1111 0 0 . 0 ... 0 @vmaxv 482 VMINV_U 1111 1110 1110 .. 10 .... 1111 1 0 . 0 ... 0 @vmaxv 483 ] 484 VMLADAV_U 1111 1110 1111 ... 0 ... . 1111 . 0 . 0 ... 0 @vmladav_nosz 485 VRMLALDAVH_U 1111 1110 1 ... ... 0 ... . 1111 . 0 . 0 ... 0 @vmlaldav_nosz 486} 487 488# Scalar operations 489 490{ 491 VCMPEQ_fp_scalar 1110 1110 0 . 11 ... 1 ... 0 1111 0100 .... @vcmp_fp_scalar size=2 492 VCMPNE_fp_scalar 1110 1110 0 . 11 ... 1 ... 0 1111 1100 .... @vcmp_fp_scalar size=2 493 VADD_scalar 1110 1110 0 . .. ... 1 ... 0 1111 . 100 .... @2scalar 494} 495 496{ 497 VCMPLT_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 1100 .... @vcmp_fp_scalar size=2 498 VCMPGE_fp_scalar 1110 1110 0 . 11 ... 1 ... 1 1111 0100 .... @vcmp_fp_scalar size=2 499 VSUB_scalar 1110 1110 0 . .. ... 1 ... 1 1111 . 100 .... @2scalar 500} 501 502{ 503 VSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar 504 VRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar 505 VQSHL_S_scalar 1110 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar 506 VQRSHL_S_scalar 1110 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar 507 VMUL_scalar 1110 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar 508} 509 510{ 511 VSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 0110 .... @shl_scalar 512 VRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 0110 .... @shl_scalar 513 VQSHL_U_scalar 1111 1110 0 . 11 .. 01 ... 1 1110 1110 .... @shl_scalar 514 VQRSHL_U_scalar 1111 1110 0 . 11 .. 11 ... 1 1110 1110 .... @shl_scalar 515 VBRSR 1111 1110 0 . .. ... 1 ... 1 1110 . 110 .... @2scalar 516} 517 518{ 519 VADD_fp_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 100 .... @2op_fp_scalar 520 VHADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar 521 VHADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 100 .... @2scalar 522} 523 524{ 525 VSUB_fp_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 100 .... @2op_fp_scalar 526 VHSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar 527 VHSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 100 .... @2scalar 528} 529 530{ 531 VQADD_S_scalar 1110 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar 532 VQADD_U_scalar 1111 1110 0 . .. ... 0 ... 0 1111 . 110 .... @2scalar 533 VQDMULLB_scalar 111 . 1110 0 . 11 ... 0 ... 0 1111 . 110 .... @2scalar_nosz \ 534 size=%size_28 535} 536 537{ 538 VQSUB_S_scalar 1110 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar 539 VQSUB_U_scalar 1111 1110 0 . .. ... 0 ... 1 1111 . 110 .... @2scalar 540 VQDMULLT_scalar 111 . 1110 0 . 11 ... 0 ... 1 1111 . 110 .... @2scalar_nosz \ 541 size=%size_28 542} 543 544{ 545 VMUL_fp_scalar 111 . 1110 0 . 11 ... 1 ... 0 1110 . 110 .... @2op_fp_scalar 546 VQDMULH_scalar 1110 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar 547 VQRDMULH_scalar 1111 1110 0 . .. ... 1 ... 0 1110 . 110 .... @2scalar 548} 549 550{ 551 VFMA_scalar 111 . 1110 0 . 11 ... 1 ... 0 1110 . 100 .... @2op_fp_scalar 552 # The U bit (28) is don't-care because it does not affect the result 553 VMLA 111 - 1110 0 . .. ... 1 ... 0 1110 . 100 .... @2scalar 554} 555 556{ 557 VFMAS_scalar 111 . 1110 0 . 11 ... 1 ... 1 1110 . 100 .... @2op_fp_scalar 558 # The U bit (28) is don't-care because it does not affect the result 559 VMLAS 111 - 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar 560} 561 562VQRDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 100 .... @2scalar 563VQRDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 100 .... @2scalar 564VQDMLAH 1110 1110 0 . .. ... 0 ... 0 1110 . 110 .... @2scalar 565VQDMLASH 1110 1110 0 . .. ... 0 ... 1 1110 . 110 .... @2scalar 566 567# Vector add across vector 568{ 569 VADDV 111 u:1 1110 1111 size:2 01 ... 0 1111 0 0 a:1 0 qm:3 0 rda=%rdalo 570 VADDLV 111 u:1 1110 1 ... 1001 ... 0 1111 00 a:1 0 qm:3 0 \ 571 rdahi=%rdahi rdalo=%rdalo 572} 573 574@vabav .... .... .. size:2 .... rda:4 .... .... .... &vabav qn=%qn qm=%qm 575 576VABAV_S 111 0 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav 577VABAV_U 111 1 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav 578 579# Logical immediate operations (1 reg and modified-immediate) 580 581# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but 582# not in a way we can conveniently represent in decodetree without 583# a lot of repetition: 584# VORR: op=0, (cmode & 1) && cmode < 12 585# VBIC: op=1, (cmode & 1) && cmode < 12 586# VMOV: everything else 587# So we have a single decode line and check the cmode/op in the 588# trans function. 589Vimm_1r 111 . 1111 1 . 00 0 ... ... 0 .... 0 1 . 1 .... @1imm 590 591# Shifts by immediate 592 593VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b 594VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h 595VSHLI 111 0 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w 596 597VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b 598VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h 599VQSHLI_S 111 0 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w 600 601VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_b 602VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_h 603VQSHLI_U 111 1 1111 1 . ... ... ... 0 0111 0 1 . 1 ... 0 @2_shl_w 604 605VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_b 606VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_h 607VQSHLUI 111 1 1111 1 . ... ... ... 0 0110 0 1 . 1 ... 0 @2_shl_w 608 609VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b 610VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h 611VSHRI_S 111 0 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w 612 613VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_b 614VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_h 615VSHRI_U 111 1 1111 1 . ... ... ... 0 0000 0 1 . 1 ... 0 @2_shr_w 616 617VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b 618VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h 619VRSHRI_S 111 0 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w 620 621VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_b 622VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_h 623VRSHRI_U 111 1 1111 1 . ... ... ... 0 0010 0 1 . 1 ... 0 @2_shr_w 624 625# VSHLL T1 encoding; the T2 VSHLL encoding is elsewhere in this file 626# Note that VMOVL is encoded as "VSHLL with a zero shift count"; we 627# implement it that way rather than special-casing it in the decode. 628VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b 629VSHLL_BS 111 0 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h 630 631VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_b 632VSHLL_BU 111 1 1110 1 . 1 .. ... ... 0 1111 0 1 . 0 ... 0 @2_shll_h 633 634VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b 635VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h 636 637VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b 638VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h 639 640# Shift-and-insert 641VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b 642VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h 643VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w 644 645VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b 646VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h 647VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w 648 649# Narrowing shifts (which only support b and h sizes) 650VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b 651VSHRNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h 652VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b 653VSHRNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h 654 655VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_b 656VRSHRNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 1 @2_shr_h 657VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_b 658VRSHRNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 1 @2_shr_h 659 660VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b 661VQSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h 662VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b 663VQSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h 664VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_b 665VQSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 0 @2_shr_h 666VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_b 667VQSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 0 @2_shr_h 668 669VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b 670VQSHRUNB 111 0 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h 671VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b 672VQSHRUNT 111 0 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h 673 674VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b 675VQRSHRNB_S 111 0 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h 676VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b 677VQRSHRNT_S 111 0 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h 678VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_b 679VQRSHRNB_U 111 1 1110 1 . ... ... ... 0 1111 0 1 . 0 ... 1 @2_shr_h 680VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_b 681VQRSHRNT_U 111 1 1110 1 . ... ... ... 1 1111 0 1 . 0 ... 1 @2_shr_h 682 683VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_b 684VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h 685VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b 686VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h 687 688VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd 689 690# Comparisons. We expand out the conditions which are split across 691# encodings T1, T2, T3 and the fc bits. These include VPT, which is 692# effectively "VCMP then VPST". A plain "VCMP" has a mask field of zero. 693{ 694 VCMPEQ_fp 111 . 1110 0 . 11 ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp_fp 695 VCMPEQ 111 1 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 0 @vcmp 696} 697 698{ 699 VCMPNE_fp 111 . 1110 0 . 11 ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp_fp 700 VCMPNE 111 1 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 0 @vcmp 701} 702 703{ 704 VCMPGE_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp_fp 705 VCMPGE 111 1 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 0 @vcmp 706} 707 708{ 709 VCMPLT_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp_fp 710 VCMPLT 111 1 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 0 @vcmp 711} 712 713{ 714 VCMPGT_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp_fp 715 VCMPGT 111 1 1110 0 . .. ... 1 ... 1 1111 0 0 . 0 ... 1 @vcmp 716} 717 718{ 719 VCMPLE_fp 111 . 1110 0 . 11 ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp_fp 720 VCMPLE 1111 1110 0 . .. ... 1 ... 1 1111 1 0 . 0 ... 1 @vcmp 721} 722 723{ 724 VPSEL 1111 1110 0 . 11 ... 1 ... 0 1111 . 0 . 0 ... 1 @2op_nosz 725 VCMPCS 1111 1110 0 . .. ... 1 ... 0 1111 0 0 . 0 ... 1 @vcmp 726 VCMPHI 1111 1110 0 . .. ... 1 ... 0 1111 1 0 . 0 ... 1 @vcmp 727} 728 729{ 730 VPNOT 1111 1110 0 0 11 000 1 000 0 1111 0100 1101 731 VPST 1111 1110 0 . 11 000 1 ... 0 1111 0100 1101 mask=%mask_22_13 732 VCMPEQ_fp_scalar 1111 1110 0 . 11 ... 1 ... 0 1111 0100 .... @vcmp_fp_scalar size=1 733 VCMPEQ_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0100 .... @vcmp_scalar 734} 735 736{ 737 VCMPNE_fp_scalar 1111 1110 0 . 11 ... 1 ... 0 1111 1100 .... @vcmp_fp_scalar size=1 738 VCMPNE_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1100 .... @vcmp_scalar 739} 740 741{ 742 VCMPGT_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 0110 .... @vcmp_fp_scalar size=1 743 VCMPGT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0110 .... @vcmp_scalar 744} 745 746{ 747 VCMPLE_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 1110 .... @vcmp_fp_scalar size=1 748 VCMPLE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1110 .... @vcmp_scalar 749} 750 751{ 752 VCMPGE_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 0100 .... @vcmp_fp_scalar size=1 753 VCMPGE_scalar 1111 1110 0 . .. ... 1 ... 1 1111 0100 .... @vcmp_scalar 754} 755{ 756 VCMPLT_fp_scalar 1111 1110 0 . 11 ... 1 ... 1 1111 1100 .... @vcmp_fp_scalar size=1 757 VCMPLT_scalar 1111 1110 0 . .. ... 1 ... 1 1111 1100 .... @vcmp_scalar 758} 759 760VCMPCS_scalar 1111 1110 0 . .. ... 1 ... 0 1111 0 1 1 0 .... @vcmp_scalar 761VCMPHI_scalar 1111 1110 0 . .. ... 1 ... 0 1111 1 1 1 0 .... @vcmp_scalar 762 763# 2-operand FP 764VADD_fp 1110 1111 0 . 0 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp 765VSUB_fp 1110 1111 0 . 1 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp 766VMUL_fp 1111 1111 0 . 0 . ... 0 ... 0 1101 . 1 . 1 ... 0 @2op_fp 767VABD_fp 1111 1111 0 . 1 . ... 0 ... 0 1101 . 1 . 0 ... 0 @2op_fp 768 769VMAXNM 1111 1111 0 . 0 . ... 0 ... 0 1111 . 1 . 1 ... 0 @2op_fp 770VMINNM 1111 1111 0 . 1 . ... 0 ... 0 1111 . 1 . 1 ... 0 @2op_fp 771 772VCADD90_fp 1111 1100 1 . 0 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev 773VCADD270_fp 1111 1101 1 . 0 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev 774 775VFMA 1110 1111 0 . 0 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp 776VFMS 1110 1111 0 . 1 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp 777 778VCMLA0 1111 110 00 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev 779VCMLA90 1111 110 01 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev 780VCMLA180 1111 110 10 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev 781VCMLA270 1111 110 11 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev 782