[PATCH v2 11/14] hw/arm/smmuv3: Harden security checks in MMIO handlers

Tao Tang posted 14 patches 4 months, 2 weeks ago
Maintainers: Eric Auger <eric.auger@redhat.com>, Peter Maydell <peter.maydell@linaro.org>
There is a newer version of this series
[PATCH v2 11/14] hw/arm/smmuv3: Harden security checks in MMIO handlers
Posted by Tao Tang 4 months, 2 weeks ago
This patch hardens the security validation within the main MMIO
dispatcher functions (smmu_read_mmio and smmu_write_mmio).

First, accesses to the secure register space are now correctly gated by
whether the SECURE_IMPL feature is enabled in the model. This prevents
guest software from accessing the secure programming interface when it is
disabled, though some registers are exempt from this check as per the
architecture.

Second, the check for the input stream's security is made more robust.
It now validates not only the legacy MemTxAttrs.secure bit, but also
the .space field. This brings the SMMU's handling of security spaces
into full alignment with the PE.

Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
---
 hw/arm/smmuv3.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index 53c7eff0e3..eec36d5fd2 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -1484,6 +1484,12 @@ static bool smmu_eventq_irq_cfg_writable(SMMUv3State *s,
     return smmu_irq_ctl_evtq_irqen_disabled(s, sec_idx);
 }
 
+/* Check if the SMMU hardware itself implements secure state features */
+static inline bool smmu_hw_secure_implemented(SMMUv3State *s)
+{
+    return FIELD_EX32(s->bank[SMMU_SEC_IDX_S].idr[1], S_IDR1, SECURE_IMPL);
+}
+
 static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecurityIndex sec_idx)
 {
     SMMUState *bs = ARM_SMMU(s);
@@ -1723,6 +1729,43 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecurityIndex sec_idx)
     return 0;
 }
 
+static bool is_secure_impl_exempt_reg(hwaddr offset)
+{
+    switch (offset) {
+    case A_S_EVENTQ_IRQ_CFG0:
+    case A_S_EVENTQ_IRQ_CFG1:
+    case A_S_EVENTQ_IRQ_CFG2:
+        return true;
+    default:
+        return false;
+    }
+}
+
+/* Helper function for Secure register access validation */
+static bool smmu_check_secure_access(SMMUv3State *s, MemTxAttrs attrs,
+                                     hwaddr offset, bool is_read)
+{   /* Check if the access is secure */
+    if (!(attrs.space == ARMSS_Secure || attrs.space == ARMSS_Root ||
+          attrs.secure == 1)) {
+        qemu_log_mask(LOG_GUEST_ERROR,
+            "%s: Non-secure %s attempt at offset 0x%" PRIx64 " (%s)\n",
+            __func__, is_read ? "read" : "write", offset,
+            is_read ? "RAZ" : "WI");
+        return false;
+    }
+
+    /* Check if the secure state is implemented. Some registers are exempted */
+    /* from this check. */
+    if (!is_secure_impl_exempt_reg(offset) && !smmu_hw_secure_implemented(s)) {
+        qemu_log_mask(LOG_GUEST_ERROR,
+            "%s: Secure %s attempt at offset 0x%" PRIx64 ". But Secure state "
+            "is not implemented (RES0)\n",
+            __func__, is_read ? "read" : "write", offset);
+        return false;
+    }
+    return true;
+}
+
 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
                                uint64_t data, MemTxAttrs attrs,
                                SMMUSecurityIndex reg_sec_idx)
@@ -2038,6 +2081,13 @@ static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
     offset &= ~0x10000;
     SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
+    if (offset >= SMMU_SECURE_BASE_OFFSET) {
+        if (!smmu_check_secure_access(s, attrs, offset, false)) {
+            trace_smmuv3_write_mmio(offset, data, size, MEMTX_OK);
+            return MEMTX_OK;
+        }
+        reg_sec_idx = SMMU_SEC_IDX_S;
+    }
 
     switch (size) {
     case 8:
@@ -2252,6 +2302,14 @@ static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
     /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
     offset &= ~0x10000;
     SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
+    if (offset >= SMMU_SECURE_BASE_OFFSET) {
+        if (!smmu_check_secure_access(s, attrs, offset, true)) {
+            *data = 0;
+            trace_smmuv3_read_mmio(offset, *data, size, MEMTX_OK);
+            return MEMTX_OK;
+        }
+        reg_sec_idx = SMMU_SEC_IDX_S;
+    }
 
     switch (size) {
     case 8:
-- 
2.34.1
Re: [PATCH v2 11/14] hw/arm/smmuv3: Harden security checks in MMIO handlers
Posted by Eric Auger 4 months, 2 weeks ago
Hi Tao,

On 9/25/25 6:26 PM, Tao Tang wrote:
> This patch hardens the security validation within the main MMIO
> dispatcher functions (smmu_read_mmio and smmu_write_mmio).
>
> First, accesses to the secure register space are now correctly gated by
> whether the SECURE_IMPL feature is enabled in the model. This prevents
> guest software from accessing the secure programming interface when it is
> disabled, though some registers are exempt from this check as per the
> architecture.
>
> Second, the check for the input stream's security is made more robust.
> It now validates not only the legacy MemTxAttrs.secure bit, but also
> the .space field. This brings the SMMU's handling of security spaces
> into full alignment with the PE.
>
> Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
> ---
>  hw/arm/smmuv3.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 58 insertions(+)
>
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index 53c7eff0e3..eec36d5fd2 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -1484,6 +1484,12 @@ static bool smmu_eventq_irq_cfg_writable(SMMUv3State *s,
>      return smmu_irq_ctl_evtq_irqen_disabled(s, sec_idx);
>  }
>  
> +/* Check if the SMMU hardware itself implements secure state features */
> +static inline bool smmu_hw_secure_implemented(SMMUv3State *s)
> +{
> +    return FIELD_EX32(s->bank[SMMU_SEC_IDX_S].idr[1], S_IDR1, SECURE_IMPL);
> +}
> +
>  static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecurityIndex sec_idx)
>  {
>      SMMUState *bs = ARM_SMMU(s);
> @@ -1723,6 +1729,43 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecurityIndex sec_idx)
>      return 0;
>  }
>  
> +static bool is_secure_impl_exempt_reg(hwaddr offset)
Worth a comment: some secure registers can be accessed even if secure HW
is not implemented. Returns true if this is the case or something alike.
> +{
> +    switch (offset) {
> +    case A_S_EVENTQ_IRQ_CFG0:
> +    case A_S_EVENTQ_IRQ_CFG1:
> +    case A_S_EVENTQ_IRQ_CFG2:
> +        return true;
> +    default:
> +        return false;
> +    }
> +}
> +
> +/* Helper function for Secure register access validation */
> +static bool smmu_check_secure_access(SMMUv3State *s, MemTxAttrs attrs,
> +                                     hwaddr offset, bool is_read)
> +{   /* Check if the access is secure */
> +    if (!(attrs.space == ARMSS_Secure || attrs.space == ARMSS_Root ||
First occurence of ARMSS_Root in hw dir? Is it needed?
> +          attrs.secure == 1)) {
> +        qemu_log_mask(LOG_GUEST_ERROR,
> +            "%s: Non-secure %s attempt at offset 0x%" PRIx64 " (%s)\n",
> +            __func__, is_read ? "read" : "write", offset,
> +            is_read ? "RAZ" : "WI");
> +        return false;
> +    }
> +
> +    /* Check if the secure state is implemented. Some registers are exempted */
> +    /* from this check. */
> +    if (!is_secure_impl_exempt_reg(offset) && !smmu_hw_secure_implemented(s)) {
> +        qemu_log_mask(LOG_GUEST_ERROR,
> +            "%s: Secure %s attempt at offset 0x%" PRIx64 ". But Secure state "
> +            "is not implemented (RES0)\n",
> +            __func__, is_read ? "read" : "write", offset);
> +        return false;
> +    }
> +    return true;
> +}
> +
>  static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
>                                 uint64_t data, MemTxAttrs attrs,
>                                 SMMUSecurityIndex reg_sec_idx)
> @@ -2038,6 +2081,13 @@ static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
>      /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
>      offset &= ~0x10000;
>      SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
> +    if (offset >= SMMU_SECURE_BASE_OFFSET) {
> +        if (!smmu_check_secure_access(s, attrs, offset, false)) {
> +            trace_smmuv3_write_mmio(offset, data, size, MEMTX_OK);
> +            return MEMTX_OK;
> +        }
> +        reg_sec_idx = SMMU_SEC_IDX_S;
> +    }
>  
>      switch (size) {
>      case 8:
> @@ -2252,6 +2302,14 @@ static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
>      /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
>      offset &= ~0x10000;
>      SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
> +    if (offset >= SMMU_SECURE_BASE_OFFSET) {
> +        if (!smmu_check_secure_access(s, attrs, offset, true)) {
> +            *data = 0;
> +            trace_smmuv3_read_mmio(offset, *data, size, MEMTX_OK);
> +            return MEMTX_OK;
> +        }
> +        reg_sec_idx = SMMU_SEC_IDX_S;
> +    }
>  
>      switch (size) {
>      case 8:
Thanks

Eric
Re: [PATCH v2 11/14] hw/arm/smmuv3: Harden security checks in MMIO handlers
Posted by Tao Tang 4 months, 2 weeks ago
Hi Eric,

On 2025/9/29 23:30, Eric Auger wrote:
> Hi Tao,
>
> On 9/25/25 6:26 PM, Tao Tang wrote:
>> This patch hardens the security validation within the main MMIO
>> dispatcher functions (smmu_read_mmio and smmu_write_mmio).
>>
>> First, accesses to the secure register space are now correctly gated by
>> whether the SECURE_IMPL feature is enabled in the model. This prevents
>> guest software from accessing the secure programming interface when it is
>> disabled, though some registers are exempt from this check as per the
>> architecture.
>>
>> Second, the check for the input stream's security is made more robust.
>> It now validates not only the legacy MemTxAttrs.secure bit, but also
>> the .space field. This brings the SMMU's handling of security spaces
>> into full alignment with the PE.
>>
>> Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
>> ---
>>   hw/arm/smmuv3.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 58 insertions(+)
>>
>> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
>> index 53c7eff0e3..eec36d5fd2 100644
>> --- a/hw/arm/smmuv3.c
>> +++ b/hw/arm/smmuv3.c
>> @@ -1484,6 +1484,12 @@ static bool smmu_eventq_irq_cfg_writable(SMMUv3State *s,
>>       return smmu_irq_ctl_evtq_irqen_disabled(s, sec_idx);
>>   }
>>   
>> +/* Check if the SMMU hardware itself implements secure state features */
>> +static inline bool smmu_hw_secure_implemented(SMMUv3State *s)
>> +{
>> +    return FIELD_EX32(s->bank[SMMU_SEC_IDX_S].idr[1], S_IDR1, SECURE_IMPL);
>> +}
>> +
>>   static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecurityIndex sec_idx)
>>   {
>>       SMMUState *bs = ARM_SMMU(s);
>> @@ -1723,6 +1729,43 @@ static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecurityIndex sec_idx)
>>       return 0;
>>   }
>>   
>> +static bool is_secure_impl_exempt_reg(hwaddr offset)
> Worth a comment: some secure registers can be accessed even if secure HW
> is not implemented. Returns true if this is the case or something alike.


You're right, that function definitely needs a comment to explain the 
architectural exception it handles. I will add one in the next version 
to improve clarity.

>> +{
>> +    switch (offset) {
>> +    case A_S_EVENTQ_IRQ_CFG0:
>> +    case A_S_EVENTQ_IRQ_CFG1:
>> +    case A_S_EVENTQ_IRQ_CFG2:
>> +        return true;
>> +    default:
>> +        return false;
>> +    }
>> +}
>> +
>> +/* Helper function for Secure register access validation */
>> +static bool smmu_check_secure_access(SMMUv3State *s, MemTxAttrs attrs,
>> +                                     hwaddr offset, bool is_read)
>> +{   /* Check if the access is secure */
>> +    if (!(attrs.space == ARMSS_Secure || attrs.space == ARMSS_Root ||
> First occurence of ARMSS_Root in hw dir? Is it needed?


This is a good question, and I'd like to clarify your expectation. My 
thinking was that if we are using ARMSecuritySpace to propagate the 
security context at the device level, then ARMSS_Root will eventually be 
part of this check.

Is your suggestion that I should remove the ARMSS_Root check for now, as 
it's not strictly necessary for the current Secure-state implementation, 
and only re-introduce it when full Realm/Root support is added to the 
SMMU model? I'm happy to do that to keep this patch focused.

Thanks,
Tao

>> +          attrs.secure == 1)) {
>> +        qemu_log_mask(LOG_GUEST_ERROR,
>> +            "%s: Non-secure %s attempt at offset 0x%" PRIx64 " (%s)\n",
>> +            __func__, is_read ? "read" : "write", offset,
>> +            is_read ? "RAZ" : "WI");
>> +        return false;
>> +    }
>> +
>> +    /* Check if the secure state is implemented. Some registers are exempted */
>> +    /* from this check. */
>> +    if (!is_secure_impl_exempt_reg(offset) && !smmu_hw_secure_implemented(s)) {
>> +        qemu_log_mask(LOG_GUEST_ERROR,
>> +            "%s: Secure %s attempt at offset 0x%" PRIx64 ". But Secure state "
>> +            "is not implemented (RES0)\n",
>> +            __func__, is_read ? "read" : "write", offset);
>> +        return false;
>> +    }
>> +    return true;
>> +}
>> +
>>   static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
>>                                  uint64_t data, MemTxAttrs attrs,
>>                                  SMMUSecurityIndex reg_sec_idx)
>> @@ -2038,6 +2081,13 @@ static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
>>       /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
>>       offset &= ~0x10000;
>>       SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
>> +    if (offset >= SMMU_SECURE_BASE_OFFSET) {
>> +        if (!smmu_check_secure_access(s, attrs, offset, false)) {
>> +            trace_smmuv3_write_mmio(offset, data, size, MEMTX_OK);
>> +            return MEMTX_OK;
>> +        }
>> +        reg_sec_idx = SMMU_SEC_IDX_S;
>> +    }
>>   
>>       switch (size) {
>>       case 8:
>> @@ -2252,6 +2302,14 @@ static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
>>       /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
>>       offset &= ~0x10000;
>>       SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
>> +    if (offset >= SMMU_SECURE_BASE_OFFSET) {
>> +        if (!smmu_check_secure_access(s, attrs, offset, true)) {
>> +            *data = 0;
>> +            trace_smmuv3_read_mmio(offset, *data, size, MEMTX_OK);
>> +            return MEMTX_OK;
>> +        }
>> +        reg_sec_idx = SMMU_SEC_IDX_S;
>> +    }
>>   
>>       switch (size) {
>>       case 8:
> Thanks
>
> Eric
Re: [PATCH v2 11/14] hw/arm/smmuv3: Harden security checks in MMIO handlers
Posted by Eric Auger 4 months, 1 week ago
Hi Tao,

On 9/29/25 5:56 PM, Tao Tang wrote:
> Hi Eric,
>
> On 2025/9/29 23:30, Eric Auger wrote:
>> Hi Tao,
>>
>> On 9/25/25 6:26 PM, Tao Tang wrote:
>>> This patch hardens the security validation within the main MMIO
>>> dispatcher functions (smmu_read_mmio and smmu_write_mmio).
>>>
>>> First, accesses to the secure register space are now correctly gated by
>>> whether the SECURE_IMPL feature is enabled in the model. This prevents
>>> guest software from accessing the secure programming interface when
>>> it is
>>> disabled, though some registers are exempt from this check as per the
>>> architecture.
>>>
>>> Second, the check for the input stream's security is made more robust.
>>> It now validates not only the legacy MemTxAttrs.secure bit, but also
>>> the .space field. This brings the SMMU's handling of security spaces
>>> into full alignment with the PE.
>>>
>>> Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
>>> ---
>>>   hw/arm/smmuv3.c | 58
>>> +++++++++++++++++++++++++++++++++++++++++++++++++
>>>   1 file changed, 58 insertions(+)
>>>
>>> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
>>> index 53c7eff0e3..eec36d5fd2 100644
>>> --- a/hw/arm/smmuv3.c
>>> +++ b/hw/arm/smmuv3.c
>>> @@ -1484,6 +1484,12 @@ static bool
>>> smmu_eventq_irq_cfg_writable(SMMUv3State *s,
>>>       return smmu_irq_ctl_evtq_irqen_disabled(s, sec_idx);
>>>   }
>>>   +/* Check if the SMMU hardware itself implements secure state
>>> features */
>>> +static inline bool smmu_hw_secure_implemented(SMMUv3State *s)
>>> +{
>>> +    return FIELD_EX32(s->bank[SMMU_SEC_IDX_S].idr[1], S_IDR1,
>>> SECURE_IMPL);
>>> +}
>>> +
>>>   static int smmuv3_cmdq_consume(SMMUv3State *s, SMMUSecurityIndex
>>> sec_idx)
>>>   {
>>>       SMMUState *bs = ARM_SMMU(s);
>>> @@ -1723,6 +1729,43 @@ static int smmuv3_cmdq_consume(SMMUv3State
>>> *s, SMMUSecurityIndex sec_idx)
>>>       return 0;
>>>   }
>>>   +static bool is_secure_impl_exempt_reg(hwaddr offset)
>> Worth a comment: some secure registers can be accessed even if secure HW
>> is not implemented. Returns true if this is the case or something alike.
>
>
> You're right, that function definitely needs a comment to explain the
> architectural exception it handles. I will add one in the next version
> to improve clarity.
>
>>> +{
>>> +    switch (offset) {
>>> +    case A_S_EVENTQ_IRQ_CFG0:
>>> +    case A_S_EVENTQ_IRQ_CFG1:
>>> +    case A_S_EVENTQ_IRQ_CFG2:
>>> +        return true;
>>> +    default:
>>> +        return false;
>>> +    }
>>> +}
>>> +
>>> +/* Helper function for Secure register access validation */
>>> +static bool smmu_check_secure_access(SMMUv3State *s, MemTxAttrs attrs,
>>> +                                     hwaddr offset, bool is_read)
>>> +{   /* Check if the access is secure */
>>> +    if (!(attrs.space == ARMSS_Secure || attrs.space == ARMSS_Root ||
>> First occurence of ARMSS_Root in hw dir? Is it needed?
>
>
> This is a good question, and I'd like to clarify your expectation. My
> thinking was that if we are using ARMSecuritySpace to propagate the
> security context at the device level, then ARMSS_Root will eventually
> be part of this check.
>
> Is your suggestion that I should remove the ARMSS_Root check for now,
> as it's not strictly necessary for the current Secure-state
> implementation, and only re-introduce it when full Realm/Root support
> is added to the SMMU model? I'm happy to do that to keep this patch
> focused.

Well I think I would remove it if not supported anywhere. As an
alternative If we can get this value and if this is definitively not
supported by the code we can assert.

Thanks

Eric
>
> Thanks,
> Tao
>
>>> +          attrs.secure == 1)) {
>>> +        qemu_log_mask(LOG_GUEST_ERROR,
>>> +            "%s: Non-secure %s attempt at offset 0x%" PRIx64 "
>>> (%s)\n",
>>> +            __func__, is_read ? "read" : "write", offset,
>>> +            is_read ? "RAZ" : "WI");
>>> +        return false;
>>> +    }
>>> +
>>> +    /* Check if the secure state is implemented. Some registers are
>>> exempted */
>>> +    /* from this check. */
>>> +    if (!is_secure_impl_exempt_reg(offset) &&
>>> !smmu_hw_secure_implemented(s)) {
>>> +        qemu_log_mask(LOG_GUEST_ERROR,
>>> +            "%s: Secure %s attempt at offset 0x%" PRIx64 ". But
>>> Secure state "
>>> +            "is not implemented (RES0)\n",
>>> +            __func__, is_read ? "read" : "write", offset);
>>> +        return false;
>>> +    }
>>> +    return true;
>>> +}
>>> +
>>>   static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
>>>                                  uint64_t data, MemTxAttrs attrs,
>>>                                  SMMUSecurityIndex reg_sec_idx)
>>> @@ -2038,6 +2081,13 @@ static MemTxResult smmu_write_mmio(void
>>> *opaque, hwaddr offset, uint64_t data,
>>>       /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact
>>> aliases */
>>>       offset &= ~0x10000;
>>>       SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
>>> +    if (offset >= SMMU_SECURE_BASE_OFFSET) {
>>> +        if (!smmu_check_secure_access(s, attrs, offset, false)) {
>>> +            trace_smmuv3_write_mmio(offset, data, size, MEMTX_OK);
>>> +            return MEMTX_OK;
>>> +        }
>>> +        reg_sec_idx = SMMU_SEC_IDX_S;
>>> +    }
>>>         switch (size) {
>>>       case 8:
>>> @@ -2252,6 +2302,14 @@ static MemTxResult smmu_read_mmio(void
>>> *opaque, hwaddr offset, uint64_t *data,
>>>       /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact
>>> aliases */
>>>       offset &= ~0x10000;
>>>       SMMUSecurityIndex reg_sec_idx = SMMU_SEC_IDX_NS;
>>> +    if (offset >= SMMU_SECURE_BASE_OFFSET) {
>>> +        if (!smmu_check_secure_access(s, attrs, offset, true)) {
>>> +            *data = 0;
>>> +            trace_smmuv3_read_mmio(offset, *data, size, MEMTX_OK);
>>> +            return MEMTX_OK;
>>> +        }
>>> +        reg_sec_idx = SMMU_SEC_IDX_S;
>>> +    }
>>>         switch (size) {
>>>       case 8:
>> Thanks
>>
>> Eric
>