tcg/ppc/tcg-target.inc.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-)
From: "Catherine A. Frederick" <mptcultist@floorchan.org>
Signed-off-by: Catherine A. Frederick <mptcultist@floorchan.org>
---
tcg/ppc/tcg-target.inc.c | 28 ++++++++++++++++++++++------
1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 7da67086c6..3cab56fe91 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -790,21 +790,25 @@ static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
{
+ tcg_debug_assert((c < 32) && (c >= 0));
tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
}
static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
{
+ tcg_debug_assert((c < 64) && (c >= 0));
tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
}
static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
{
+ tcg_debug_assert((c < 32) && (c >= 0));
tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
}
static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
{
+ tcg_debug_assert((c < 64) && (c >= 0));
tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
}
@@ -2610,21 +2614,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_shl_i32:
if (const_args[2]) {
- tcg_out_shli32(s, args[0], args[1], args[2]);
+ /* Limit shift immediate to prevent illegal instruction
+ from bitmask corruption */
+ tcg_out_shli32(s, args[0], args[1], args[2] & 31);
} else {
tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
}
break;
case INDEX_op_shr_i32:
if (const_args[2]) {
- tcg_out_shri32(s, args[0], args[1], args[2]);
+ /* Both use RLWINM, which has a 5 bit field for the
+ shift mask. */
+ tcg_out_shri32(s, args[0], args[1], args[2] & 31);
} else {
tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
}
break;
case INDEX_op_sar_i32:
if (const_args[2]) {
- tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
+ /* SRAWI has a 5 bit sized field for the shift mask
+ as well. */
+ tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
} else {
tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
}
@@ -2696,21 +2706,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
case INDEX_op_shl_i64:
if (const_args[2]) {
- tcg_out_shli64(s, args[0], args[1], args[2]);
+ /* Limit shift immediate to prevent illegal instruction from
+ from bitmask corruption */
+ tcg_out_shli64(s, args[0], args[1], args[2] & 63);
} else {
tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
}
break;
case INDEX_op_shr_i64:
if (const_args[2]) {
- tcg_out_shri64(s, args[0], args[1], args[2]);
+ /* Same applies here, as both RLDICL, and RLDICR have a
+ 6 bit large mask for the shift value */
+ tcg_out_shri64(s, args[0], args[1], args[2] & 63);
} else {
tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
}
break;
case INDEX_op_sar_i64:
if (const_args[2]) {
- int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
+ /* Same for SRADI, except there's no function
+ to call into. */
+ int sh = SH(((args[2] & 63) & 0x1f) | ((((args[2] & 63) >> 5) & 1) << 1));
tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
} else {
tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
--
2.26.2
Oh dear, I did it to myself again. On Wed, Jun 3, 2020 at 7:13 PM <agrecascino123@gmail.com> wrote: > > From: "Catherine A. Frederick" <mptcultist@floorchan.org> > > Signed-off-by: Catherine A. Frederick <mptcultist@floorchan.org> > --- > tcg/ppc/tcg-target.inc.c | 28 ++++++++++++++++++++++------ > 1 file changed, 22 insertions(+), 6 deletions(-) > > diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c > index 7da67086c6..3cab56fe91 100644 > --- a/tcg/ppc/tcg-target.inc.c > +++ b/tcg/ppc/tcg-target.inc.c > @@ -790,21 +790,25 @@ static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) > > static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c) > { > + tcg_debug_assert((c < 32) && (c >= 0)); > tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c); > } > > static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c) > { > + tcg_debug_assert((c < 64) && (c >= 0)); > tcg_out_rld(s, RLDICR, dst, src, c, 63 - c); > } > > static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c) > { > + tcg_debug_assert((c < 32) && (c >= 0)); > tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31); > } > > static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c) > { > + tcg_debug_assert((c < 64) && (c >= 0)); > tcg_out_rld(s, RLDICL, dst, src, 64 - c, c); > } > > @@ -2610,21 +2614,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, > > case INDEX_op_shl_i32: > if (const_args[2]) { > - tcg_out_shli32(s, args[0], args[1], args[2]); > + /* Limit shift immediate to prevent illegal instruction > + from bitmask corruption */ > + tcg_out_shli32(s, args[0], args[1], args[2] & 31); > } else { > tcg_out32(s, SLW | SAB(args[1], args[0], args[2])); > } > break; > case INDEX_op_shr_i32: > if (const_args[2]) { > - tcg_out_shri32(s, args[0], args[1], args[2]); > + /* Both use RLWINM, which has a 5 bit field for the > + shift mask. */ > + tcg_out_shri32(s, args[0], args[1], args[2] & 31); > } else { > tcg_out32(s, SRW | SAB(args[1], args[0], args[2])); > } > break; > case INDEX_op_sar_i32: > if (const_args[2]) { > - tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2])); > + /* SRAWI has a 5 bit sized field for the shift mask > + as well. */ > + tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31)); > } else { > tcg_out32(s, SRAW | SAB(args[1], args[0], args[2])); > } > @@ -2696,21 +2706,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, > > case INDEX_op_shl_i64: > if (const_args[2]) { > - tcg_out_shli64(s, args[0], args[1], args[2]); > + /* Limit shift immediate to prevent illegal instruction from > + from bitmask corruption */ > + tcg_out_shli64(s, args[0], args[1], args[2] & 63); > } else { > tcg_out32(s, SLD | SAB(args[1], args[0], args[2])); > } > break; > case INDEX_op_shr_i64: > if (const_args[2]) { > - tcg_out_shri64(s, args[0], args[1], args[2]); > + /* Same applies here, as both RLDICL, and RLDICR have a > + 6 bit large mask for the shift value */ > + tcg_out_shri64(s, args[0], args[1], args[2] & 63); > } else { > tcg_out32(s, SRD | SAB(args[1], args[0], args[2])); > } > break; > case INDEX_op_sar_i64: > if (const_args[2]) { > - int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1); > + /* Same for SRADI, except there's no function > + to call into. */ > + int sh = SH(((args[2] & 63) & 0x1f) | ((((args[2] & 63) >> 5) & 1) << 1)); > tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh); > } else { > tcg_out32(s, SRAD | SAB(args[1], args[0], args[2])); > -- > 2.26.2 >
© 2016 - 2024 Red Hat, Inc.