There exists a common function sign_extend64() to sign extend a 64-bit
value using specified bit as sign-bit in include/linux/bitops.h, it is
more efficient, let us use it and remove the arch-specific sign_extend()
under arch/loongarch.
Suggested-by: Jinyang He <hejinyang@loongson.cn>
Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
arch/loongarch/include/asm/inst.h | 8 --------
arch/loongarch/kernel/alternative.c | 6 +++---
2 files changed, 3 insertions(+), 11 deletions(-)
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index e0771d2..ba18ce8 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -397,14 +397,6 @@ static inline bool unsigned_imm_check(unsigned long val, unsigned int bit)
return val < (1UL << bit);
}
-static inline unsigned long sign_extend(unsigned long val, unsigned int idx)
-{
- if (!is_imm_negative(val, idx + 1))
- return ((1UL << idx) - 1) & val;
- else
- return ~((1UL << idx) - 1) | val;
-}
-
#define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \
static inline void emit_##NAME(union loongarch_instruction *insn, \
int offset) \
diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c
index c5aebea..4ad1384 100644
--- a/arch/loongarch/kernel/alternative.c
+++ b/arch/loongarch/kernel/alternative.c
@@ -74,7 +74,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
switch (src->reg0i26_format.opcode) {
case b_op:
case bl_op:
- jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27);
+ jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
@@ -93,7 +93,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
fallthrough;
case beqz_op:
case bnez_op:
- jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22);
+ jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
@@ -112,7 +112,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
case bge_op:
case bltu_op:
case bgeu_op:
- jump_addr = cur_pc + sign_extend(si << 2, 17);
+ jump_addr = cur_pc + sign_extend64(si << 2, 17);
if (in_alt_jump(jump_addr, start, end))
return;
offset = jump_addr - pc;
--
2.1.0
I queued this patch, but others need more reviews. Huacai On Sat, Jan 14, 2023 at 5:27 PM Tiezhu Yang <yangtiezhu@loongson.cn> wrote: > > There exists a common function sign_extend64() to sign extend a 64-bit > value using specified bit as sign-bit in include/linux/bitops.h, it is > more efficient, let us use it and remove the arch-specific sign_extend() > under arch/loongarch. > > Suggested-by: Jinyang He <hejinyang@loongson.cn> > Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn> > --- > arch/loongarch/include/asm/inst.h | 8 -------- > arch/loongarch/kernel/alternative.c | 6 +++--- > 2 files changed, 3 insertions(+), 11 deletions(-) > > diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h > index e0771d2..ba18ce8 100644 > --- a/arch/loongarch/include/asm/inst.h > +++ b/arch/loongarch/include/asm/inst.h > @@ -397,14 +397,6 @@ static inline bool unsigned_imm_check(unsigned long val, unsigned int bit) > return val < (1UL << bit); > } > > -static inline unsigned long sign_extend(unsigned long val, unsigned int idx) > -{ > - if (!is_imm_negative(val, idx + 1)) > - return ((1UL << idx) - 1) & val; > - else > - return ~((1UL << idx) - 1) | val; > -} > - > #define DEF_EMIT_REG0I26_FORMAT(NAME, OP) \ > static inline void emit_##NAME(union loongarch_instruction *insn, \ > int offset) \ > diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c > index c5aebea..4ad1384 100644 > --- a/arch/loongarch/kernel/alternative.c > +++ b/arch/loongarch/kernel/alternative.c > @@ -74,7 +74,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf, > switch (src->reg0i26_format.opcode) { > case b_op: > case bl_op: > - jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27); > + jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27); > if (in_alt_jump(jump_addr, start, end)) > return; > offset = jump_addr - pc; > @@ -93,7 +93,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf, > fallthrough; > case beqz_op: > case bnez_op: > - jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22); > + jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22); > if (in_alt_jump(jump_addr, start, end)) > return; > offset = jump_addr - pc; > @@ -112,7 +112,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf, > case bge_op: > case bltu_op: > case bgeu_op: > - jump_addr = cur_pc + sign_extend(si << 2, 17); > + jump_addr = cur_pc + sign_extend64(si << 2, 17); > if (in_alt_jump(jump_addr, start, end)) > return; > offset = jump_addr - pc; > -- > 2.1.0 >
© 2016 - 2025 Red Hat, Inc.