Lines Matching +full:reg +full:- +full:shift

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
14 #include <asm/debug-monitors.h>
27 int shift; in aarch64_get_imm_shift_mask() local
31 mask = BIT(26) - 1; in aarch64_get_imm_shift_mask()
32 shift = 0; in aarch64_get_imm_shift_mask()
35 mask = BIT(19) - 1; in aarch64_get_imm_shift_mask()
36 shift = 5; in aarch64_get_imm_shift_mask()
39 mask = BIT(16) - 1; in aarch64_get_imm_shift_mask()
40 shift = 5; in aarch64_get_imm_shift_mask()
43 mask = BIT(14) - 1; in aarch64_get_imm_shift_mask()
44 shift = 5; in aarch64_get_imm_shift_mask()
47 mask = BIT(12) - 1; in aarch64_get_imm_shift_mask()
48 shift = 10; in aarch64_get_imm_shift_mask()
51 mask = BIT(9) - 1; in aarch64_get_imm_shift_mask()
52 shift = 12; in aarch64_get_imm_shift_mask()
55 mask = BIT(7) - 1; in aarch64_get_imm_shift_mask()
56 shift = 15; in aarch64_get_imm_shift_mask()
60 mask = BIT(6) - 1; in aarch64_get_imm_shift_mask()
61 shift = 10; in aarch64_get_imm_shift_mask()
64 mask = BIT(6) - 1; in aarch64_get_imm_shift_mask()
65 shift = 16; in aarch64_get_imm_shift_mask()
69 shift = 22; in aarch64_get_imm_shift_mask()
72 return -EINVAL; in aarch64_get_imm_shift_mask()
76 *shiftp = shift; in aarch64_get_imm_shift_mask()
83 #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
84 #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
91 int shift; in aarch64_insn_decode_immediate() local
95 shift = 0; in aarch64_insn_decode_immediate()
99 mask = ADR_IMM_SIZE - 1; in aarch64_insn_decode_immediate()
102 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { in aarch64_insn_decode_immediate()
109 return (insn >> shift) & mask; in aarch64_insn_decode_immediate()
116 int shift; in aarch64_insn_encode_immediate() local
123 shift = 0; in aarch64_insn_encode_immediate()
132 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { in aarch64_insn_encode_immediate()
140 insn &= ~(mask << shift); in aarch64_insn_encode_immediate()
141 insn |= (imm & mask) << shift; in aarch64_insn_encode_immediate()
149 int shift; in aarch64_insn_decode_register() local
154 shift = 0; in aarch64_insn_decode_register()
157 shift = 5; in aarch64_insn_decode_register()
161 shift = 10; in aarch64_insn_decode_register()
164 shift = 16; in aarch64_insn_decode_register()
172 return (insn >> shift) & GENMASK(4, 0); in aarch64_insn_decode_register()
177 enum aarch64_insn_register reg) in aarch64_insn_encode_register() argument
179 int shift; in aarch64_insn_encode_register() local
184 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { in aarch64_insn_encode_register()
185 pr_err("%s: unknown register encoding %d\n", __func__, reg); in aarch64_insn_encode_register()
192 shift = 0; in aarch64_insn_encode_register()
195 shift = 5; in aarch64_insn_encode_register()
199 shift = 10; in aarch64_insn_encode_register()
203 shift = 16; in aarch64_insn_encode_register()
211 insn &= ~(GENMASK(4, 0) << shift); in aarch64_insn_encode_register()
212 insn |= reg << shift; in aarch64_insn_encode_register()
251 offset = ((long)addr - (long)pc); in label_imm_common()
253 if (offset < -range || offset >= range) { in label_imm_common()
268 * B/BL support [-128M, 128M) offset in aarch64_insn_gen_branch_imm()
270 * texts are within +/-128M. in aarch64_insn_gen_branch_imm()
293 enum aarch64_insn_register reg, in aarch64_insn_gen_comp_branch_imm() argument
327 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_comp_branch_imm()
353 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, in aarch64_insn_gen_branch_reg() argument
373 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); in aarch64_insn_gen_branch_reg()
376 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, in aarch64_insn_gen_load_store_reg() argument
401 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_load_store_reg()
410 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg, in aarch64_insn_gen_load_store_imm() argument
417 u32 shift; in aarch64_insn_gen_load_store_imm() local
424 shift = aarch64_insn_ldst_size[size]; in aarch64_insn_gen_load_store_imm()
425 if (imm & ~(BIT(12 + shift) - BIT(shift))) { in aarch64_insn_gen_load_store_imm()
430 imm >>= shift; in aarch64_insn_gen_load_store_imm()
449 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_load_store_imm()
458 enum aarch64_insn_register reg, in aarch64_insn_gen_load_literal() argument
473 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); in aarch64_insn_gen_load_literal()
487 int shift; in aarch64_insn_gen_load_store_pair() local
509 if ((offset & 0x3) || (offset < -256) || (offset > 252)) { in aarch64_insn_gen_load_store_pair()
510 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n", in aarch64_insn_gen_load_store_pair()
514 shift = 2; in aarch64_insn_gen_load_store_pair()
517 if ((offset & 0x7) || (offset < -512) || (offset > 504)) { in aarch64_insn_gen_load_store_pair()
518 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n", in aarch64_insn_gen_load_store_pair()
522 shift = 3; in aarch64_insn_gen_load_store_pair()
540 offset >> shift); in aarch64_insn_gen_load_store_pair()
543 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg, in aarch64_insn_gen_load_store_ex() argument
572 reg); in aarch64_insn_gen_load_store_ex()
766 /* We can't encode more than a 24bit value (12bit + 12bit shift) */ in aarch64_insn_gen_add_sub_imm()
767 if (imm & ~(BIT(24) - 1)) in aarch64_insn_gen_add_sub_imm()
771 if (imm & ~(SZ_4K - 1)) { in aarch64_insn_gen_add_sub_imm()
772 /* ... and in the low 12 bits -> error */ in aarch64_insn_gen_add_sub_imm()
773 if (imm & (SZ_4K - 1)) in aarch64_insn_gen_add_sub_imm()
847 int imm, int shift, in aarch64_insn_gen_movewide() argument
868 if (imm & ~(SZ_64K - 1)) { in aarch64_insn_gen_movewide()
875 if (shift != 0 && shift != 16) { in aarch64_insn_gen_movewide()
876 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_movewide()
877 shift); in aarch64_insn_gen_movewide()
883 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) { in aarch64_insn_gen_movewide()
884 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_movewide()
885 shift); in aarch64_insn_gen_movewide()
894 insn |= (shift >> 4) << 21; in aarch64_insn_gen_movewide()
903 enum aarch64_insn_register reg, in aarch64_insn_gen_add_sub_shifted_reg() argument
904 int shift, in aarch64_insn_gen_add_sub_shifted_reg() argument
930 if (shift & ~(SZ_32 - 1)) { in aarch64_insn_gen_add_sub_shifted_reg()
931 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_add_sub_shifted_reg()
932 shift); in aarch64_insn_gen_add_sub_shifted_reg()
938 if (shift & ~(SZ_64 - 1)) { in aarch64_insn_gen_add_sub_shifted_reg()
939 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_add_sub_shifted_reg()
940 shift); in aarch64_insn_gen_add_sub_shifted_reg()
954 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); in aarch64_insn_gen_add_sub_shifted_reg()
956 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); in aarch64_insn_gen_add_sub_shifted_reg()
1004 enum aarch64_insn_register reg, in aarch64_insn_gen_data2() argument
1049 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); in aarch64_insn_gen_data2()
1097 enum aarch64_insn_register reg, in aarch64_insn_gen_logical_shifted_reg() argument
1098 int shift, in aarch64_insn_gen_logical_shifted_reg() argument
1136 if (shift & ~(SZ_32 - 1)) { in aarch64_insn_gen_logical_shifted_reg()
1137 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_logical_shifted_reg()
1138 shift); in aarch64_insn_gen_logical_shifted_reg()
1144 if (shift & ~(SZ_64 - 1)) { in aarch64_insn_gen_logical_shifted_reg()
1145 pr_err("%s: invalid shift encoding %d\n", __func__, in aarch64_insn_gen_logical_shifted_reg()
1146 shift); in aarch64_insn_gen_logical_shifted_reg()
1160 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); in aarch64_insn_gen_logical_shifted_reg()
1162 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); in aarch64_insn_gen_logical_shifted_reg()
1179 enum aarch64_insn_register reg, in aarch64_insn_gen_adr() argument
1188 offset = addr - pc; in aarch64_insn_gen_adr()
1192 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12; in aarch64_insn_gen_adr()
1199 if (offset < -SZ_1M || offset >= SZ_1M) in aarch64_insn_gen_adr()
1202 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg); in aarch64_insn_gen_adr()
1335 mask = GENMASK(esz - 1, 0); in aarch64_encode_immediate()
1346 u64 emask = BIT(tmp) - 1; in aarch64_encode_immediate()
1365 * imms is set to (ones - 1), prefixed with a string of ones in aarch64_encode_immediate()
1368 imms = ones - 1; in aarch64_encode_immediate()
1370 imms &= BIT(6) - 1; in aarch64_encode_immediate()
1404 immr = (esz - ror) % esz; in aarch64_encode_immediate()