Lines Matching +full:reg +full:- +full:shift
1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
15 #include <asm/debug-monitors.h>
28 int shift; in aarch64_get_imm_shift_mask() local
32 mask = BIT(26) - 1; in aarch64_get_imm_shift_mask()
33 shift = 0; in aarch64_get_imm_shift_mask()
36 mask = BIT(19) - 1; in aarch64_get_imm_shift_mask()
37 shift = 5; in aarch64_get_imm_shift_mask()
40 mask = BIT(16) - 1; in aarch64_get_imm_shift_mask()
41 shift = 5; in aarch64_get_imm_shift_mask()
44 mask = BIT(14) - 1; in aarch64_get_imm_shift_mask()
45 shift = 5; in aarch64_get_imm_shift_mask()
48 mask = BIT(12) - 1; in aarch64_get_imm_shift_mask()
49 shift = 10; in aarch64_get_imm_shift_mask()
52 mask = BIT(9) - 1; in aarch64_get_imm_shift_mask()
53 shift = 12; in aarch64_get_imm_shift_mask()
56 mask = BIT(7) - 1; in aarch64_get_imm_shift_mask()
57 shift = 15; in aarch64_get_imm_shift_mask()
61 mask = BIT(6) - 1; in aarch64_get_imm_shift_mask()
62 shift = 10; in aarch64_get_imm_shift_mask()
65 mask = BIT(6) - 1; in aarch64_get_imm_shift_mask()
66 shift = 16; in aarch64_get_imm_shift_mask()
70 shift = 22; in aarch64_get_imm_shift_mask()
73 return -EINVAL; in aarch64_get_imm_shift_mask()
77 *shiftp = shift; in aarch64_get_imm_shift_mask()
84 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
85 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
92 int shift; in aarch64_insn_decode_immediate() local
96 shift = 0; in aarch64_insn_decode_immediate()
100 mask = ADR_IMM_SIZE - 1; in aarch64_insn_decode_immediate()
103 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { in aarch64_insn_decode_immediate()
110 return (insn >> shift) & mask; in aarch64_insn_decode_immediate()
117 int shift; in aarch64_insn_encode_immediate() local
124 shift = 0; in aarch64_insn_encode_immediate()
133 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { in aarch64_insn_encode_immediate()
141 insn &= ~(mask << shift); in aarch64_insn_encode_immediate()
142 insn |= (imm & mask) << shift; in aarch64_insn_encode_immediate()
150 int shift; in aarch64_insn_decode_register() local
155 shift = 0; in aarch64_insn_decode_register()
158 shift = 5; in aarch64_insn_decode_register()
162 shift = 10; in aarch64_insn_decode_register()
165 shift = 16; in aarch64_insn_decode_register()
173 return (insn >> shift) & GENMASK(4, 0); in aarch64_insn_decode_register()
178 enum aarch64_insn_register reg) in aarch64_insn_encode_register() argument
180 int shift; in aarch64_insn_encode_register() local
185 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { in aarch64_insn_encode_register()
186 pr_err("%s: unknown register encoding %d\n", __func__, reg); in aarch64_insn_encode_register()
193 shift = 0; in aarch64_insn_encode_register()
196 shift = 5; in aarch64_insn_encode_register()
200 shift = 10; in aarch64_insn_encode_register()
204 shift = 16; in aarch64_insn_encode_register()
212 insn &= ~(GENMASK(4, 0) << shift); in aarch64_insn_encode_register()
213 insn |= reg << shift; in aarch64_insn_encode_register()
252 offset = ((long)addr - (long)pc); in label_imm_common()
254 if (offset < -range || offset >= range) { in label_imm_common()
269 * B/BL support [-128M, 128M) offset in aarch64_insn_gen_branch_imm()
271 * texts are within +/-128M. in aarch64_insn_gen_branch_imm()
294 enum aarch64_insn_register reg, in aarch64_insn_gen_comp_branch_imm() argument
328 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_comp_branch_imm()
354 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, in aarch64_insn_gen_branch_reg() argument
374 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); in aarch64_insn_gen_branch_reg()
377 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, in aarch64_insn_gen_load_store_reg() argument
402 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_load_store_reg()
411 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg, in aarch64_insn_gen_load_store_imm() argument
418 u32 shift; in aarch64_insn_gen_load_store_imm() local
425 shift = aarch64_insn_ldst_size[size]; in aarch64_insn_gen_load_store_imm()
426 if (imm & ~(BIT(12 + shift) - BIT(shift))) { in aarch64_insn_gen_load_store_imm()
431 imm >>= shift; in aarch64_insn_gen_load_store_imm()
450 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_load_store_imm()
459 enum aarch64_insn_register reg, in aarch64_insn_gen_load_literal() argument
474 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_load_literal()
488 int shift; in aarch64_insn_gen_load_store_pair() local
510 if ((offset & 0x3) || (offset < -256) || (offset > 252)) { in aarch64_insn_gen_load_store_pair()
511 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n", in aarch64_insn_gen_load_store_pair()
515 shift = 2; in aarch64_insn_gen_load_store_pair()
518 if ((offset & 0x7) || (offset < -512) || (offset > 504)) { in aarch64_insn_gen_load_store_pair()
519 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n", in aarch64_insn_gen_load_store_pair()
523 shift = 3; in aarch64_insn_gen_load_store_pair()
541 offset >> shift); in aarch64_insn_gen_load_store_pair()
544 u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg, in aarch64_insn_gen_load_acq_store_rel() argument
559 pr_err("%s: unknown load-acquire/store-release encoding %d\n", in aarch64_insn_gen_load_acq_store_rel()
567 reg); in aarch64_insn_gen_load_acq_store_rel()
573 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, in aarch64_insn_gen_load_store_ex() argument
602 reg); in aarch64_insn_gen_load_store_ex()
796 /* We can't encode more than a 24bit value (12bit + 12bit shift) */ in aarch64_insn_gen_add_sub_imm()
797 if (imm & ~(BIT(24) - 1)) in aarch64_insn_gen_add_sub_imm()
801 if (imm & ~(SZ_4K - 1)) { in aarch64_insn_gen_add_sub_imm()
802 /* ... and in the low 12 bits -> error */ in aarch64_insn_gen_add_sub_imm()
803 if (imm & (SZ_4K - 1)) in aarch64_insn_gen_add_sub_imm()
877 int imm, int shift, in aarch64_insn_gen_movewide() argument
898 if (imm & ~(SZ_64K - 1)) { in aarch64_insn_gen_movewide()
905 if (shift != 0 && shift != 16) { in aarch64_insn_gen_movewide()
906 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_movewide()
907 shift); in aarch64_insn_gen_movewide()
913 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) { in aarch64_insn_gen_movewide()
914 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_movewide()
915 shift); in aarch64_insn_gen_movewide()
924 insn |= (shift >> 4) << 21; in aarch64_insn_gen_movewide()
933 enum aarch64_insn_register reg, in aarch64_insn_gen_add_sub_shifted_reg() argument
934 int shift, in aarch64_insn_gen_add_sub_shifted_reg() argument
960 if (shift & ~(SZ_32 - 1)) { in aarch64_insn_gen_add_sub_shifted_reg()
961 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_add_sub_shifted_reg()
962 shift); in aarch64_insn_gen_add_sub_shifted_reg()
968 if (shift & ~(SZ_64 - 1)) { in aarch64_insn_gen_add_sub_shifted_reg()
969 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_add_sub_shifted_reg()
970 shift); in aarch64_insn_gen_add_sub_shifted_reg()
984 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); in aarch64_insn_gen_add_sub_shifted_reg()
986 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); in aarch64_insn_gen_add_sub_shifted_reg()
1034 enum aarch64_insn_register reg, in aarch64_insn_gen_data2() argument
1079 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); in aarch64_insn_gen_data2()
1127 enum aarch64_insn_register reg, in aarch64_insn_gen_logical_shifted_reg() argument
1128 int shift, in aarch64_insn_gen_logical_shifted_reg() argument
1166 if (shift & ~(SZ_32 - 1)) { in aarch64_insn_gen_logical_shifted_reg()
1167 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_logical_shifted_reg()
1168 shift); in aarch64_insn_gen_logical_shifted_reg()
1174 if (shift & ~(SZ_64 - 1)) { in aarch64_insn_gen_logical_shifted_reg()
1175 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_logical_shifted_reg()
1176 shift); in aarch64_insn_gen_logical_shifted_reg()
1190 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); in aarch64_insn_gen_logical_shifted_reg()
1192 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); in aarch64_insn_gen_logical_shifted_reg()
1209 enum aarch64_insn_register reg, in aarch64_insn_gen_adr() argument
1218 offset = addr - pc; in aarch64_insn_gen_adr()
1222 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12; in aarch64_insn_gen_adr()
1229 if (offset < -SZ_1M || offset >= SZ_1M) in aarch64_insn_gen_adr()
1232 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg); in aarch64_insn_gen_adr()
1365 mask = GENMASK(esz - 1, 0); in aarch64_encode_immediate()
1376 u64 emask = BIT(tmp) - 1; in aarch64_encode_immediate()
1395 * imms is set to (ones - 1), prefixed with a string of ones in aarch64_encode_immediate()
1398 imms = ones - 1; in aarch64_encode_immediate()
1400 imms &= BIT(6) - 1; in aarch64_encode_immediate()
1434 immr = (esz - ror) % esz; in aarch64_encode_immediate()