[PATCH v3] tcg: Sanitize shift constants on ppc64le so that shift operations with large constants don't generate invalid instructions.

agrecascino123@gmail.com posted 1 patch 3 years, 11 months ago
Test docker-mingw@fedora passed
Test checkpatch passed
Test asan passed
Test docker-quick@centos7 passed
Test FreeBSD passed
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/qemu tags/patchew/20200603231330.5819-1-agrecascino123@gmail.com
Maintainers: Richard Henderson <rth@twiddle.net>
There is a newer version of this series
tcg/ppc/tcg-target.inc.c | 28 ++++++++++++++++++++++------
1 file changed, 22 insertions(+), 6 deletions(-)
[PATCH v3] tcg: Sanitize shift constants on ppc64le so that shift operations with large constants don't generate invalid instructions.
Posted by agrecascino123@gmail.com 3 years, 11 months ago
From: "Catherine A. Frederick" <mptcultist@floorchan.org>

Signed-off-by: Catherine A. Frederick <mptcultist@floorchan.org>
---
 tcg/ppc/tcg-target.inc.c | 28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)

diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
index 7da67086c6..3cab56fe91 100644
--- a/tcg/ppc/tcg-target.inc.c
+++ b/tcg/ppc/tcg-target.inc.c
@@ -790,21 +790,25 @@ static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
 
 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
 {
+    tcg_debug_assert((c < 32) && (c >= 0));
     tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
 }
 
 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
 {
+    tcg_debug_assert((c < 64) && (c >= 0));
     tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
 }
 
 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
 {
+    tcg_debug_assert((c < 32) && (c >= 0));
     tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
 }
 
 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
 {
+    tcg_debug_assert((c < 64) && (c >= 0));
     tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
 }
 
@@ -2610,21 +2614,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
 
     case INDEX_op_shl_i32:
         if (const_args[2]) {
-            tcg_out_shli32(s, args[0], args[1], args[2]);
+            /* Limit shift immediate to prevent illegal instruction
+               from bitmask corruption */
+            tcg_out_shli32(s, args[0], args[1], args[2] & 31);
         } else {
             tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
         }
         break;
     case INDEX_op_shr_i32:
         if (const_args[2]) {
-            tcg_out_shri32(s, args[0], args[1], args[2]);
+            /* Both use RLWINM, which has a 5 bit field for the
+               shift mask. */
+            tcg_out_shri32(s, args[0], args[1], args[2] & 31);
         } else {
             tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
         }
         break;
     case INDEX_op_sar_i32:
         if (const_args[2]) {
-            tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
+            /* SRAWI has a 5 bit sized field for the shift mask
+               as well. */
+            tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
         } else {
             tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
         }
@@ -2696,21 +2706,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
 
     case INDEX_op_shl_i64:
         if (const_args[2]) {
-            tcg_out_shli64(s, args[0], args[1], args[2]);
+            /* Limit shift immediate to prevent illegal instruction from
+               from bitmask corruption */
+            tcg_out_shli64(s, args[0], args[1], args[2] & 63);
         } else {
             tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
         }
         break;
     case INDEX_op_shr_i64:
         if (const_args[2]) {
-            tcg_out_shri64(s, args[0], args[1], args[2]);
+            /* Same applies here, as both RLDICL, and RLDICR have a
+               6 bit large mask for the shift value */
+            tcg_out_shri64(s, args[0], args[1], args[2] & 63);
         } else {
             tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
         }
         break;
     case INDEX_op_sar_i64:
         if (const_args[2]) {
-            int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
+            /* Same for SRADI, except there's no function
+               to call into. */
+            int sh = SH(((args[2] & 63) & 0x1f) | ((((args[2] & 63) >> 5) & 1) << 1));
             tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
         } else {
             tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
-- 
2.26.2


Re: [PATCH v3] tcg: Sanitize shift constants on ppc64le so that shift operations with large constants don't generate invalid instructions.
Posted by Catherine A. Frederick / mptcultist 3 years, 11 months ago
Oh dear, I did it to myself again.

On Wed, Jun 3, 2020 at 7:13 PM <agrecascino123@gmail.com> wrote:
>
> From: "Catherine A. Frederick" <mptcultist@floorchan.org>
>
> Signed-off-by: Catherine A. Frederick <mptcultist@floorchan.org>
> ---
>  tcg/ppc/tcg-target.inc.c | 28 ++++++++++++++++++++++------
>  1 file changed, 22 insertions(+), 6 deletions(-)
>
> diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c
> index 7da67086c6..3cab56fe91 100644
> --- a/tcg/ppc/tcg-target.inc.c
> +++ b/tcg/ppc/tcg-target.inc.c
> @@ -790,21 +790,25 @@ static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
>
>  static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
>  {
> +    tcg_debug_assert((c < 32) && (c >= 0));
>      tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
>  }
>
>  static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
>  {
> +    tcg_debug_assert((c < 64) && (c >= 0));
>      tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
>  }
>
>  static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
>  {
> +    tcg_debug_assert((c < 32) && (c >= 0));
>      tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
>  }
>
>  static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
>  {
> +    tcg_debug_assert((c < 64) && (c >= 0));
>      tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
>  }
>
> @@ -2610,21 +2614,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
>
>      case INDEX_op_shl_i32:
>          if (const_args[2]) {
> -            tcg_out_shli32(s, args[0], args[1], args[2]);
> +            /* Limit shift immediate to prevent illegal instruction
> +               from bitmask corruption */
> +            tcg_out_shli32(s, args[0], args[1], args[2] & 31);
>          } else {
>              tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
>          }
>          break;
>      case INDEX_op_shr_i32:
>          if (const_args[2]) {
> -            tcg_out_shri32(s, args[0], args[1], args[2]);
> +            /* Both use RLWINM, which has a 5 bit field for the
> +               shift mask. */
> +            tcg_out_shri32(s, args[0], args[1], args[2] & 31);
>          } else {
>              tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
>          }
>          break;
>      case INDEX_op_sar_i32:
>          if (const_args[2]) {
> -            tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
> +            /* SRAWI has a 5 bit sized field for the shift mask
> +               as well. */
> +            tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2] & 31));
>          } else {
>              tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
>          }
> @@ -2696,21 +2706,27 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
>
>      case INDEX_op_shl_i64:
>          if (const_args[2]) {
> -            tcg_out_shli64(s, args[0], args[1], args[2]);
> +            /* Limit shift immediate to prevent illegal instruction from
> +               from bitmask corruption */
> +            tcg_out_shli64(s, args[0], args[1], args[2] & 63);
>          } else {
>              tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
>          }
>          break;
>      case INDEX_op_shr_i64:
>          if (const_args[2]) {
> -            tcg_out_shri64(s, args[0], args[1], args[2]);
> +            /* Same applies here, as both RLDICL, and RLDICR have a
> +               6 bit large mask for the shift value */
> +            tcg_out_shri64(s, args[0], args[1], args[2] & 63);
>          } else {
>              tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
>          }
>          break;
>      case INDEX_op_sar_i64:
>          if (const_args[2]) {
> -            int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
> +            /* Same for SRADI, except there's no function
> +               to call into. */
> +            int sh = SH(((args[2] & 63) & 0x1f) | ((((args[2] & 63) >> 5) & 1) << 1));
>              tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
>          } else {
>              tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
> --
> 2.26.2
>