[RFC v3 17/21] hw/arm/smmuv3: Pass security state to command queue and IRQ logic

Tao Tang posted 21 patches 4 months ago
Maintainers: Eric Auger <eric.auger@redhat.com>, Peter Maydell <peter.maydell@linaro.org>
[RFC v3 17/21] hw/arm/smmuv3: Pass security state to command queue and IRQ logic
Posted by Tao Tang 4 months ago
The command queue and interrupt logic must operate within the correct
security context. Handling a command queue in one security state can
have side effects, such as interrupts or errors, that need to be
processed in another. This requires the IRQ and GERROR logic to be
fully aware of the multi-security-state environment.

This patch refactors the command queue processing and interrupt handling
to be security-state aware. Besides, unlike the command queue, the
event queue logic was already updated to be security-state aware in a
previous change. The SMMUSecSID is now passed through the relevant
functions to ensure that:

- Command queue operations are performed on the correct register bank.

- Interrupts are triggered and checked against the correct security
state's configuration.

- Errors from command processing are reported in the correct GERROR
register bank.

- Architectural access controls, like preventing secure commands from a
non-secure queue, are correctly enforced.

- As Secure Stage 2 is not yet implemented, commands that target it
are now correctly aborted during command queue processing.

Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
---
 hw/arm/smmuv3.c     | 61 +++++++++++++++++++++++++++++++--------------
 hw/arm/trace-events |  2 +-
 2 files changed, 43 insertions(+), 20 deletions(-)

diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 432de88610..4ac7a2f3c7 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -46,11 +46,11 @@
  *
  * @irq: irq type
  * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
+ * @sec_sid: SEC_SID of the bank
  */
 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
-                               uint32_t gerror_mask)
+                               uint32_t gerror_mask, SMMUSecSID sec_sid)
 {
-    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
     SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
 
     bool pulse = false;
@@ -87,9 +87,9 @@ static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
     }
 }
 
-static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
+static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn,
+                                 SMMUSecSID sec_sid)
 {
-    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
     SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
     uint32_t pending = bank->gerror ^ bank->gerrorn;
     uint32_t toggled = bank->gerrorn ^ new_gerrorn;
@@ -109,7 +109,7 @@ static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
     trace_smmuv3_write_gerrorn(toggled & pending, bank->gerrorn);
 }
 
-static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
+static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd, SMMUSecSID sec_sid)
 {
     dma_addr_t addr = Q_CONS_ENTRY(q);
     MemTxResult ret;
@@ -167,7 +167,7 @@ static MemTxResult smmuv3_write_eventq(SMMUv3State *s, SMMUSecSID sec_sid,
     }
 
     if (!smmuv3_q_empty(q)) {
-        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
+        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0, sec_sid);
     }
     return MEMTX_OK;
 }
@@ -263,7 +263,8 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
                               info->sid);
     r = smmuv3_write_eventq(s, sec_sid, &evt);
     if (r != MEMTX_OK) {
-        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
+        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR,
+                           R_GERROR_EVENTQ_ABT_ERR_MASK, sec_sid);
     }
     info->recorded = true;
 }
@@ -1457,11 +1458,10 @@ static bool smmu_eventq_irq_cfg_writable(SMMUv3State *s, SMMUSecSID sec_sid)
     return smmu_irq_ctl_evtq_irqen_disabled(s, sec_sid);
 }
 
-static int smmuv3_cmdq_consume(SMMUv3State *s)
+static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecSID sec_sid)
 {
     SMMUState *bs = ARM_SMMU(s);
     SMMUCmdError cmd_error = SMMU_CERROR_NONE;
-    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
     SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
     SMMUQueue *q = &bank->cmdq;
     SMMUCommandType type = 0;
@@ -1480,14 +1480,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
         uint32_t pending = bank->gerror ^ bank->gerrorn;
         Cmd cmd;
 
-        trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
+        trace_smmuv3_cmdq_consume(sec_sid, Q_PROD(q), Q_CONS(q),
                                   Q_PROD_WRAP(q), Q_CONS_WRAP(q));
 
         if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
             break;
         }
 
-        if (queue_read(q, &cmd) != MEMTX_OK) {
+        if (queue_read(q, &cmd, sec_sid) != MEMTX_OK) {
             cmd_error = SMMU_CERROR_ABT;
             break;
         }
@@ -1500,7 +1500,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
         switch (type) {
         case SMMU_CMD_SYNC:
             if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
-                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
+                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0, sec_sid);
             }
             break;
         case SMMU_CMD_PREFETCH_CONFIG:
@@ -1512,6 +1512,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
             SMMUDevice *sdev = smmu_find_sdev(bs, sid);
 
             if (CMD_SSEC(&cmd)) {
+                if (sec_sid != SMMU_SEC_SID_S) {
+                    /* Secure Stream with Non-Secure command */
+                    cmd_error = SMMU_CERROR_ILL;
+                    break;
+                }
                 cmd_error = SMMU_CERROR_ILL;
                 break;
             }
@@ -1532,6 +1537,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
             SMMUSIDRange sid_range;
 
             if (CMD_SSEC(&cmd)) {
+                if (sec_sid != SMMU_SEC_SID_S) {
+                    cmd_error = SMMU_CERROR_ILL;
+                    break;
+                }
                 cmd_error = SMMU_CERROR_ILL;
                 break;
             }
@@ -1551,6 +1560,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
             SMMUDevice *sdev = smmu_find_sdev(bs, sid);
 
             if (CMD_SSEC(&cmd)) {
+                if (sec_sid != SMMU_SEC_SID_S) {
+                    cmd_error = SMMU_CERROR_ILL;
+                    break;
+                }
                 cmd_error = SMMU_CERROR_ILL;
                 break;
             }
@@ -1618,7 +1631,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
                 cmd_error = SMMU_CERROR_ILL;
                 break;
             }
-            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, SMMU_SEC_SID_NS);
+            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, sec_sid);
             break;
         case SMMU_CMD_TLBI_S12_VMALL:
         {
@@ -1628,6 +1641,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
                 cmd_error = SMMU_CERROR_ILL;
                 break;
             }
+            /* Secure Stage 2 isn't supported for now */
+            if (sec_sid != SMMU_SEC_SID_NS) {
+                cmd_error = SMMU_CERROR_ABT;
+                break;
+            }
 
             trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
             smmu_inv_notifiers_all(&s->smmu_state);
@@ -1639,11 +1657,16 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
                 cmd_error = SMMU_CERROR_ILL;
                 break;
             }
+
+            if (sec_sid != SMMU_SEC_SID_NS) {
+                cmd_error = SMMU_CERROR_ABT;
+                break;
+            }
             /*
              * As currently only either s1 or s2 are supported
              * we can reuse same function for s2.
              */
-            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, SMMU_SEC_SID_NS);
+            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, sec_sid);
             break;
         case SMMU_CMD_TLBI_EL3_ALL:
         case SMMU_CMD_TLBI_EL3_VA:
@@ -1680,7 +1703,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
     if (cmd_error) {
         trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
         smmu_write_cmdq_err(s, cmd_error, sec_sid);
-        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
+        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK, sec_sid);
     }
 
     trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
@@ -1772,7 +1795,7 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
         bank->cr[0] = data;
         bank->cr0ack = data & ~SMMU_CR0_RESERVED;
         /* in case the command queue has been enabled */
-        smmuv3_cmdq_consume(s);
+        smmuv3_cmdq_consume(s, reg_sec_sid);
         return MEMTX_OK;
     case A_CR1:
         bank->cr[1] = data;
@@ -1792,12 +1815,12 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
         bank->irq_ctrl = data;
         return MEMTX_OK;
     case A_GERRORN:
-        smmuv3_write_gerrorn(s, data);
+        smmuv3_write_gerrorn(s, data, reg_sec_sid);
         /*
          * By acknowledging the CMDQ_ERR, SW may notify cmds can
          * be processed again
          */
-        smmuv3_cmdq_consume(s);
+        smmuv3_cmdq_consume(s, reg_sec_sid);
         return MEMTX_OK;
     case A_GERROR_IRQ_CFG0: /* 64b */
         if (!smmu_gerror_irq_cfg_writable(s, reg_sec_sid)) {
@@ -1899,7 +1922,7 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
         return MEMTX_OK;
     case A_CMDQ_PROD:
         bank->cmdq.prod = data;
-        smmuv3_cmdq_consume(s);
+        smmuv3_cmdq_consume(s, reg_sec_sid);
         return MEMTX_OK;
     case A_CMDQ_CONS:
         if (!smmu_cmdqen_disabled(s, reg_sec_sid)) {
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index 0e7ad8fee3..697e0d84f3 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -35,7 +35,7 @@ smmuv3_trigger_irq(int irq) "irq=%d"
 smmuv3_write_gerror(uint32_t toggled, uint32_t gerror) "toggled=0x%x, new GERROR=0x%x"
 smmuv3_write_gerrorn(uint32_t acked, uint32_t gerrorn) "acked=0x%x, new GERRORN=0x%x"
 smmuv3_unhandled_cmd(uint32_t type) "Unhandled command type=%d"
-smmuv3_cmdq_consume(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod=%d cons=%d prod.wrap=%d cons.wrap=%d"
+smmuv3_cmdq_consume(int sec_sid, uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "sec_sid=%d prod=%d cons=%d prod.wrap=%d cons.wrap=%d"
 smmuv3_cmdq_opcode(const char *opcode) "<--- %s"
 smmuv3_cmdq_consume_out(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod:%d, cons:%d, prod_wrap:%d, cons_wrap:%d "
 smmuv3_cmdq_consume_error(const char *cmd_name, uint8_t cmd_error) "Error on %s command execution: %d"
-- 
2.34.1
Re: [RFC v3 17/21] hw/arm/smmuv3: Pass security state to command queue and IRQ logic
Posted by Eric Auger 2 months, 1 week ago

On 10/12/25 5:14 PM, Tao Tang wrote:
> The command queue and interrupt logic must operate within the correct
> security context. Handling a command queue in one security state can
> have side effects, such as interrupts or errors, that need to be
> processed in another. This requires the IRQ and GERROR logic to be
> fully aware of the multi-security-state environment.
>
> This patch refactors the command queue processing and interrupt handling
> to be security-state aware. Besides, unlike the command queue, the
> event queue logic was already updated to be security-state aware in a
> previous change. The SMMUSecSID is now passed through the relevant
> functions to ensure that:
>
> - Command queue operations are performed on the correct register bank.
>
> - Interrupts are triggered and checked against the correct security
> state's configuration.
>
> - Errors from command processing are reported in the correct GERROR
> register bank.
>
> - Architectural access controls, like preventing secure commands from a
> non-secure queue, are correctly enforced.
>
> - As Secure Stage 2 is not yet implemented, commands that target it
> are now correctly aborted during command queue processing.
>
> Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
> ---
>  hw/arm/smmuv3.c     | 61 +++++++++++++++++++++++++++++++--------------
>  hw/arm/trace-events |  2 +-
>  2 files changed, 43 insertions(+), 20 deletions(-)
>
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index 432de88610..4ac7a2f3c7 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -46,11 +46,11 @@
>   *
>   * @irq: irq type
>   * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
> + * @sec_sid: SEC_SID of the bank
>   */
>  static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
> -                               uint32_t gerror_mask)
> +                               uint32_t gerror_mask, SMMUSecSID sec_sid)
>  {
> -    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
>      SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
>  
>      bool pulse = false;
> @@ -87,9 +87,9 @@ static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
>      }
>  }
>  
> -static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
> +static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn,
> +                                 SMMUSecSID sec_sid)
>  {
> -    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
>      SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
>      uint32_t pending = bank->gerror ^ bank->gerrorn;
>      uint32_t toggled = bank->gerrorn ^ new_gerrorn;
> @@ -109,7 +109,7 @@ static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
>      trace_smmuv3_write_gerrorn(toggled & pending, bank->gerrorn);
>  }
>  
> -static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
> +static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd, SMMUSecSID sec_sid)
why this change as sec_sid is not yet used?
besides the q is already specialized for a given sec_sid
>  {
>      dma_addr_t addr = Q_CONS_ENTRY(q);
>      MemTxResult ret;
> @@ -167,7 +167,7 @@ static MemTxResult smmuv3_write_eventq(SMMUv3State *s, SMMUSecSID sec_sid,
>      }
>  
>      if (!smmuv3_q_empty(q)) {
> -        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
> +        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0, sec_sid);
>      }
>      return MEMTX_OK;
>  }
> @@ -263,7 +263,8 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
>                                info->sid);
>      r = smmuv3_write_eventq(s, sec_sid, &evt);
>      if (r != MEMTX_OK) {
> -        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
> +        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR,
> +                           R_GERROR_EVENTQ_ABT_ERR_MASK, sec_sid);
>      }
>      info->recorded = true;
>  }
> @@ -1457,11 +1458,10 @@ static bool smmu_eventq_irq_cfg_writable(SMMUv3State *s, SMMUSecSID sec_sid)
>      return smmu_irq_ctl_evtq_irqen_disabled(s, sec_sid);
>  }
>  
> -static int smmuv3_cmdq_consume(SMMUv3State *s)
> +static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecSID sec_sid)
>  {
>      SMMUState *bs = ARM_SMMU(s);
>      SMMUCmdError cmd_error = SMMU_CERROR_NONE;
> -    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
>      SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
>      SMMUQueue *q = &bank->cmdq;
>      SMMUCommandType type = 0;
> @@ -1480,14 +1480,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>          uint32_t pending = bank->gerror ^ bank->gerrorn;
>          Cmd cmd;
>  
> -        trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
> +        trace_smmuv3_cmdq_consume(sec_sid, Q_PROD(q), Q_CONS(q),
>                                    Q_PROD_WRAP(q), Q_CONS_WRAP(q));
>  
>          if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
>              break;
>          }
>  
> -        if (queue_read(q, &cmd) != MEMTX_OK) {
> +        if (queue_read(q, &cmd, sec_sid) != MEMTX_OK) {
>              cmd_error = SMMU_CERROR_ABT;
>              break;
>          }
> @@ -1500,7 +1500,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>          switch (type) {
>          case SMMU_CMD_SYNC:
>              if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
> -                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
> +                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0, sec_sid);
>              }
>              break;
>          case SMMU_CMD_PREFETCH_CONFIG:
> @@ -1512,6 +1512,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>              SMMUDevice *sdev = smmu_find_sdev(bs, sid);
>  
>              if (CMD_SSEC(&cmd)) {
when reading the spec I have the impression SSEC is common to all commands
4.1.6 Common command fields
Can't you factorize that check?
> +                if (sec_sid != SMMU_SEC_SID_S) {
> +                    /* Secure Stream with Non-Secure command */
> +                    cmd_error = SMMU_CERROR_ILL;
> +                    break;
> +                }
>                  cmd_error = SMMU_CERROR_ILL;
>                  break;
>              }
> @@ -1532,6 +1537,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>              SMMUSIDRange sid_range;
>  
>              if (CMD_SSEC(&cmd)) {
> +                if (sec_sid != SMMU_SEC_SID_S) {
> +                    cmd_error = SMMU_CERROR_ILL;
> +                    break;
> +                }
>                  cmd_error = SMMU_CERROR_ILL;
>                  break;
>              }
> @@ -1551,6 +1560,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>              SMMUDevice *sdev = smmu_find_sdev(bs, sid);
>  
>              if (CMD_SSEC(&cmd)) {
> +                if (sec_sid != SMMU_SEC_SID_S) {
> +                    cmd_error = SMMU_CERROR_ILL;
> +                    break;
> +                }
>                  cmd_error = SMMU_CERROR_ILL;
>                  break;
>              }
> @@ -1618,7 +1631,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>                  cmd_error = SMMU_CERROR_ILL;
>                  break;
>              }
> -            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, SMMU_SEC_SID_NS);
> +            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, sec_sid);
>              break;
>          case SMMU_CMD_TLBI_S12_VMALL:
>          {
> @@ -1628,6 +1641,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>                  cmd_error = SMMU_CERROR_ILL;
>                  break;
>              }
> +            /* Secure Stage 2 isn't supported for now */
> +            if (sec_sid != SMMU_SEC_SID_NS) {
> +                cmd_error = SMMU_CERROR_ABT;
> +                break;
> +            }
>  
>              trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
>              smmu_inv_notifiers_all(&s->smmu_state);
> @@ -1639,11 +1657,16 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>                  cmd_error = SMMU_CERROR_ILL;
>                  break;
>              }
> +
> +            if (sec_sid != SMMU_SEC_SID_NS) {
> +                cmd_error = SMMU_CERROR_ABT;
> +                break;
> +            }
>              /*
>               * As currently only either s1 or s2 are supported
>               * we can reuse same function for s2.
>               */
> -            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, SMMU_SEC_SID_NS);
> +            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, sec_sid);
>              break;
>          case SMMU_CMD_TLBI_EL3_ALL:
>          case SMMU_CMD_TLBI_EL3_VA:
> @@ -1680,7 +1703,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>      if (cmd_error) {
>          trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
>          smmu_write_cmdq_err(s, cmd_error, sec_sid);
> -        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
> +        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK, sec_sid);
>      }
>  
>      trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
> @@ -1772,7 +1795,7 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
>          bank->cr[0] = data;
>          bank->cr0ack = data & ~SMMU_CR0_RESERVED;
>          /* in case the command queue has been enabled */
> -        smmuv3_cmdq_consume(s);
> +        smmuv3_cmdq_consume(s, reg_sec_sid);
>          return MEMTX_OK;
>      case A_CR1:
>          bank->cr[1] = data;
> @@ -1792,12 +1815,12 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
>          bank->irq_ctrl = data;
>          return MEMTX_OK;
>      case A_GERRORN:
> -        smmuv3_write_gerrorn(s, data);
> +        smmuv3_write_gerrorn(s, data, reg_sec_sid);
>          /*
>           * By acknowledging the CMDQ_ERR, SW may notify cmds can
>           * be processed again
>           */
> -        smmuv3_cmdq_consume(s);
> +        smmuv3_cmdq_consume(s, reg_sec_sid);
>          return MEMTX_OK;
>      case A_GERROR_IRQ_CFG0: /* 64b */
>          if (!smmu_gerror_irq_cfg_writable(s, reg_sec_sid)) {
> @@ -1899,7 +1922,7 @@ static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
>          return MEMTX_OK;
>      case A_CMDQ_PROD:
>          bank->cmdq.prod = data;
> -        smmuv3_cmdq_consume(s);
> +        smmuv3_cmdq_consume(s, reg_sec_sid);
>          return MEMTX_OK;
>      case A_CMDQ_CONS:
>          if (!smmu_cmdqen_disabled(s, reg_sec_sid)) {
> diff --git a/hw/arm/trace-events b/hw/arm/trace-events
> index 0e7ad8fee3..697e0d84f3 100644
> --- a/hw/arm/trace-events
> +++ b/hw/arm/trace-events
> @@ -35,7 +35,7 @@ smmuv3_trigger_irq(int irq) "irq=%d"
>  smmuv3_write_gerror(uint32_t toggled, uint32_t gerror) "toggled=0x%x, new GERROR=0x%x"
>  smmuv3_write_gerrorn(uint32_t acked, uint32_t gerrorn) "acked=0x%x, new GERRORN=0x%x"
>  smmuv3_unhandled_cmd(uint32_t type) "Unhandled command type=%d"
> -smmuv3_cmdq_consume(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod=%d cons=%d prod.wrap=%d cons.wrap=%d"
> +smmuv3_cmdq_consume(int sec_sid, uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "sec_sid=%d prod=%d cons=%d prod.wrap=%d cons.wrap=%d"
>  smmuv3_cmdq_opcode(const char *opcode) "<--- %s"
>  smmuv3_cmdq_consume_out(uint32_t prod, uint32_t cons, uint8_t prod_wrap, uint8_t cons_wrap) "prod:%d, cons:%d, prod_wrap:%d, cons_wrap:%d "
>  smmuv3_cmdq_consume_error(const char *cmd_name, uint8_t cmd_error) "Error on %s command execution: %d"
Thanks

Eric
Re: [RFC v3 17/21] hw/arm/smmuv3: Pass security state to command queue and IRQ logic
Posted by Tao Tang 2 months, 1 week ago
Hi Eric,

On 2025/12/4 22:46, Eric Auger wrote:
>
> On 10/12/25 5:14 PM, Tao Tang wrote:
>> The command queue and interrupt logic must operate within the correct
>> security context. Handling a command queue in one security state can
>> have side effects, such as interrupts or errors, that need to be
>> processed in another. This requires the IRQ and GERROR logic to be
>> fully aware of the multi-security-state environment.
>>
>> This patch refactors the command queue processing and interrupt handling
>> to be security-state aware. Besides, unlike the command queue, the
>> event queue logic was already updated to be security-state aware in a
>> previous change. The SMMUSecSID is now passed through the relevant
>> functions to ensure that:
>>
>> - Command queue operations are performed on the correct register bank.
>>
>> - Interrupts are triggered and checked against the correct security
>> state's configuration.
>>
>> - Errors from command processing are reported in the correct GERROR
>> register bank.
>>
>> - Architectural access controls, like preventing secure commands from a
>> non-secure queue, are correctly enforced.
>>
>> - As Secure Stage 2 is not yet implemented, commands that target it
>> are now correctly aborted during command queue processing.
>>
>> Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
>> ---
>>   hw/arm/smmuv3.c     | 61 +++++++++++++++++++++++++++++++--------------
>>   hw/arm/trace-events |  2 +-
>>   2 files changed, 43 insertions(+), 20 deletions(-)
>>
>> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
>> index 432de88610..4ac7a2f3c7 100644
>> --- a/hw/arm/smmuv3.c
>> +++ b/hw/arm/smmuv3.c
>> @@ -46,11 +46,11 @@
>>    *
>>    * @irq: irq type
>>    * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
>> + * @sec_sid: SEC_SID of the bank
>>    */
>>   static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
>> -                               uint32_t gerror_mask)
>> +                               uint32_t gerror_mask, SMMUSecSID sec_sid)
>>   {
>> -    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
>>       SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
>>   
>>       bool pulse = false;
>> @@ -87,9 +87,9 @@ static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
>>       }
>>   }
>>   
>> -static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
>> +static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn,
>> +                                 SMMUSecSID sec_sid)
>>   {
>> -    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
>>       SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
>>       uint32_t pending = bank->gerror ^ bank->gerrorn;
>>       uint32_t toggled = bank->gerrorn ^ new_gerrorn;
>> @@ -109,7 +109,7 @@ static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
>>       trace_smmuv3_write_gerrorn(toggled & pending, bank->gerrorn);
>>   }
>>   
>> -static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd)
>> +static inline MemTxResult queue_read(SMMUQueue *q, Cmd *cmd, SMMUSecSID sec_sid)
> why this change as sec_sid is not yet used?
> besides the q is already specialized for a given sec_sid
>>   {
>>       dma_addr_t addr = Q_CONS_ENTRY(q);
>>       MemTxResult ret;
>> @@ -167,7 +167,7 @@ static MemTxResult smmuv3_write_eventq(SMMUv3State *s, SMMUSecSID sec_sid,
>>       }
>>   
>>       if (!smmuv3_q_empty(q)) {
>> -        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
>> +        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0, sec_sid);
>>       }
>>       return MEMTX_OK;
>>   }
>> @@ -263,7 +263,8 @@ void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
>>                                 info->sid);
>>       r = smmuv3_write_eventq(s, sec_sid, &evt);
>>       if (r != MEMTX_OK) {
>> -        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
>> +        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR,
>> +                           R_GERROR_EVENTQ_ABT_ERR_MASK, sec_sid);
>>       }
>>       info->recorded = true;
>>   }
>> @@ -1457,11 +1458,10 @@ static bool smmu_eventq_irq_cfg_writable(SMMUv3State *s, SMMUSecSID sec_sid)
>>       return smmu_irq_ctl_evtq_irqen_disabled(s, sec_sid);
>>   }
>>   
>> -static int smmuv3_cmdq_consume(SMMUv3State *s)
>> +static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecSID sec_sid)
>>   {
>>       SMMUState *bs = ARM_SMMU(s);
>>       SMMUCmdError cmd_error = SMMU_CERROR_NONE;
>> -    SMMUSecSID sec_sid = SMMU_SEC_SID_NS;
>>       SMMUv3RegBank *bank = smmuv3_bank(s, sec_sid);
>>       SMMUQueue *q = &bank->cmdq;
>>       SMMUCommandType type = 0;
>> @@ -1480,14 +1480,14 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>           uint32_t pending = bank->gerror ^ bank->gerrorn;
>>           Cmd cmd;
>>   
>> -        trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
>> +        trace_smmuv3_cmdq_consume(sec_sid, Q_PROD(q), Q_CONS(q),
>>                                     Q_PROD_WRAP(q), Q_CONS_WRAP(q));
>>   
>>           if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
>>               break;
>>           }
>>   
>> -        if (queue_read(q, &cmd) != MEMTX_OK) {
>> +        if (queue_read(q, &cmd, sec_sid) != MEMTX_OK) {
>>               cmd_error = SMMU_CERROR_ABT;
>>               break;
>>           }
>> @@ -1500,7 +1500,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>           switch (type) {
>>           case SMMU_CMD_SYNC:
>>               if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
>> -                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
>> +                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0, sec_sid);
>>               }
>>               break;
>>           case SMMU_CMD_PREFETCH_CONFIG:
>> @@ -1512,6 +1512,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>               SMMUDevice *sdev = smmu_find_sdev(bs, sid);
>>   
>>               if (CMD_SSEC(&cmd)) {
> when reading the spec I have the impression SSEC is common to all commands
> 4.1.6 Common command fields
> Can't you factorize that check?
>> +                if (sec_sid != SMMU_SEC_SID_S) {
>> +                    /* Secure Stream with Non-Secure command */
>> +                    cmd_error = SMMU_CERROR_ILL;
>> +                    break;
>> +                }
>>                   cmd_error = SMMU_CERROR_ILL;
>>                   break;
>>               }
>> @@ -1532,6 +1537,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>               SMMUSIDRange sid_range;
>>   
>>               if (CMD_SSEC(&cmd)) {
>> +                if (sec_sid != SMMU_SEC_SID_S) {
>> +                    cmd_error = SMMU_CERROR_ILL;
>> +                    break;
>> +                }
>>                   cmd_error = SMMU_CERROR_ILL;
>>                   break;
>>               }
>> @@ -1551,6 +1560,10 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>               SMMUDevice *sdev = smmu_find_sdev(bs, sid);
>>   
>>               if (CMD_SSEC(&cmd)) {
>> +                if (sec_sid != SMMU_SEC_SID_S) {
>> +                    cmd_error = SMMU_CERROR_ILL;
>> +                    break;
>> +                }
>>                   cmd_error = SMMU_CERROR_ILL;
>>                   break;
>>               }
>> @@ -1618,7 +1631,7 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>                   cmd_error = SMMU_CERROR_ILL;
>>                   break;
>>               }
>> -            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, SMMU_SEC_SID_NS);
>> +            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, sec_sid);
>>               break;
>>           case SMMU_CMD_TLBI_S12_VMALL:
>>           {
>> @@ -1628,6 +1641,11 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>                   cmd_error = SMMU_CERROR_ILL;
>>                   break;
>>               }
>> +            /* Secure Stage 2 isn't supported for now */
>> +            if (sec_sid != SMMU_SEC_SID_NS) {
>> +                cmd_error = SMMU_CERROR_ABT;
>> +                break;
>> +            }
>>   
>>               trace_smmuv3_cmdq_tlbi_s12_vmid(vmid);
>>               smmu_inv_notifiers_all(&s->smmu_state);
>> @@ -1639,11 +1657,16 @@ static int smmuv3_cmdq_consume(SMMUv3State *s)
>>                   cmd_error = SMMU_CERROR_ILL;
>>                   break;
>>               }
>> +
>> +            if (sec_sid != SMMU_SEC_SID_NS) {
>> +                cmd_error = SMMU_CERROR_ABT;
>> +                break;
>> +            }
>>               /*
>>                * As currently only either s1 or s2 are supported
>>                * we can reuse same function for s2.
>>                */
>> -            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, SMMU_SEC_SID_NS);
>> +            smmuv3_range_inval(bs, &cmd, SMMU_STAGE_2, sec_sid);
>>               break;
>>           case SMMU_CMD_TLBI_EL3_ALL:
>>           case SMMU_CMD_TLBI_EL3_VA:


Thanks for the pointers on 4.1.6. After reading the spec, I realize I 
did not clearly separate two related concepts in my initial implementation:

- the security context of the command queue / register bank, and

- the security state of the target stream described by SSEC in the 
command payload that the command is meant to operate on.


I plan to split the command queue’s security state from the stream's 
security state. The command queue bank (NS/S) still drives which 
register bank sees the GERROR/CMDQ state, while a small helper now 
interprets SSEC so every opcode automatically targets the right stream 
security state, which is pointed by SMMUSecSID *stream_sid (NS-only for 
the NS queue; Secure vs Non-secure selected per SSEC on the Secure queue).



static bool smmuv3_cmd_resolve_stream_sid(SMMUSecSID cmdq_sid,
                                           const Cmd *cmd,
                                           SMMUSecSID *stream_sid,       
   /* Output SEC_SID that point to the target stream and will be used in 
the subsequent code */
                                           SMMUCmdError *err)
{
     uint32_t ssec = CMD_SSEC(cmd);

     switch (cmdq_sid) {
     case SMMU_SEC_SID_NS:
         if (ssec) {
             *err = SMMU_CERROR_ILL;
             return false;
         }
         *stream_sid = SMMU_SEC_SID_NS;
         return true;
     case SMMU_SEC_SID_S:
         *stream_sid = ssec ? SMMU_SEC_SID_S : SMMU_SEC_SID_NS;
         return true;
     default:
         *err = SMMU_CERROR_ILL;
         return false;
     }
}


static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecSID sec_sid) {

.......

         if (!smmuv3_cmd_resolve_stream_sid(sec_sid, &cmd,
                                            &stream_sid, &cmd_error)) {
             break;
         }

         qemu_mutex_lock(&s->mutex);
         switch (type) {

.......

         case SMMU_CMD_TLBI_NH_VAA:
         case SMMU_CMD_TLBI_NH_VA:
             if (!STAGE1_SUPPORTED(s)) {
                 cmd_error = SMMU_CERROR_ILL;
                 break;
             }
             smmuv3_range_inval(bs, &cmd, SMMU_STAGE_1, stream_sid);  /* 
Use the output SEC_SID that returned from smmuv3_cmd_resolve_stream_sid*/
             break;

}


Could you please take a quick look at the helper before I fold it into v4?


Also I'll remove `SMMUSecSID sec_sid` in queue_read.


Thanks a lot for your time!

Best regards,
Tao