Enhance the page table walker to correctly handle secure and non-secure
memory accesses. This change introduces logic to select the appropriate
address space and enforce architectural security policies during walks.
The page table walker now correctly processes Secure Stage 1
translations. Key changes include:
- The get_pte function now uses the security context to fetch table
entries from either the Secure or Non-secure address space.
- The stage 1 walker tracks the security state, respecting the NSCFG
and NSTable attributes. It correctly handles the hierarchical security
model: if a table descriptor in a secure walk has NSTable=1, all
subsequent lookups for that walk are forced into the Non-secure space.
This is a one-way transition, as specified by the architecture.
- A check is added to fault nested translations that produce a Secure
IPA when Secure stage 2 is not supported (SMMU_S_IDR1.SEL2 == 0).
- The final TLB entry is tagged with the correct output address space,
ensuring proper memory isolation.
Stage 2 translations are currently limited to Non-secure lookups. Full
support for Secure Stage 2 translation will be added in a future series.
Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
---
hw/arm/smmu-common.c | 64 +++++++++++++++++++++++++++++++++++++++-----
hw/arm/trace-events | 2 +-
2 files changed, 59 insertions(+), 7 deletions(-)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 5fabe30c75..a092bb5a8d 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -399,20 +399,26 @@ void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid)
* @base_addr[@index]
*/
static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
- SMMUPTWEventInfo *info)
+ SMMUPTWEventInfo *info, SMMUSecSID sec_sid)
{
int ret;
dma_addr_t addr = baseaddr + index * sizeof(*pte);
-
+ MemTxAttrs attrs = smmu_get_txattrs(sec_sid);
+ AddressSpace *as = smmu_get_address_space(sec_sid);
+ if (!as) {
+ info->type = SMMU_PTW_ERR_WALK_EABT;
+ info->addr = addr;
+ return -EINVAL;
+ }
/* TODO: guarantee 64-bit single-copy atomicity */
- ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED);
+ ret = ldq_le_dma(as, addr, pte, attrs);
if (ret != MEMTX_OK) {
info->type = SMMU_PTW_ERR_WALK_EABT;
info->addr = addr;
return -EINVAL;
}
- trace_smmu_get_pte(baseaddr, index, addr, *pte);
+ trace_smmu_get_pte(sec_sid, baseaddr, index, addr, *pte);
return 0;
}
@@ -543,6 +549,8 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
baseaddr = extract64(tt->ttb, 0, cfg->oas);
baseaddr &= ~indexmask;
+ int nscfg = tt->nscfg;
+ bool forced_ns = false; /* Track if NSTable=1 forced NS mode */
while (level < VMSA_LEVELS) {
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
@@ -552,7 +560,10 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
uint8_t ap;
- if (get_pte(baseaddr, offset, &pte, info)) {
+ /* Use NS if forced by previous NSTable=1 or current nscfg */
+ int current_ns = forced_ns || nscfg;
+ SMMUSecSID sec_sid = current_ns ? SMMU_SEC_SID_NS : SMMU_SEC_SID_S;
+ if (get_pte(baseaddr, offset, &pte, info, sec_sid)) {
goto error;
}
trace_smmu_ptw_level(stage, level, iova, subpage_size,
@@ -577,6 +588,26 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
goto error;
}
}
+
+ /*
+ * Hierarchical control of Secure/Non-secure accesses:
+ * If NSTable=1 from Secure space, force all subsequent lookups to
+ * Non-secure space and ignore future NSTable according to
+ * (IHI 0070G.b)13.4.1 Stage 1 page permissions and
+ * (DDI 0487H.a)D8.4.2 Control of Secure or Non-secure memory access
+ */
+ if (!forced_ns) {
+ int new_nstable = PTE_NSTABLE(pte);
+ if (!current_ns && new_nstable) {
+ /* First transition from Secure to Non-secure */
+ forced_ns = true;
+ nscfg = 1;
+ } else if (!forced_ns) {
+ /* Still in original mode, update nscfg normally */
+ nscfg = new_nstable;
+ }
+ /* If forced_ns is already true, ignore NSTable bit */
+ }
level++;
continue;
} else if (is_page_pte(pte, level)) {
@@ -619,6 +650,13 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
goto error;
}
+ tlbe->sec_sid = PTE_NS(pte) ? SMMU_SEC_SID_NS : SMMU_SEC_SID_S;
+ tlbe->entry.target_as = smmu_get_address_space(tlbe->sec_sid);
+ if (!tlbe->entry.target_as) {
+ info->type = SMMU_PTW_ERR_WALK_EABT;
+ info->addr = gpa;
+ goto error;
+ }
tlbe->entry.translated_addr = gpa;
tlbe->entry.iova = iova & ~mask;
tlbe->entry.addr_mask = mask;
@@ -688,7 +726,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
uint8_t s2ap;
- if (get_pte(baseaddr, offset, &pte, info)) {
+ /* Use NS as Secure Stage 2 is not implemented (SMMU_S_IDR1.SEL2 == 0)*/
+ if (get_pte(baseaddr, offset, &pte, info, SMMU_SEC_SID_NS)) {
goto error;
}
trace_smmu_ptw_level(stage, level, ipa, subpage_size,
@@ -741,6 +780,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
goto error_ipa;
}
+ tlbe->sec_sid = SMMU_SEC_SID_NS;
+ tlbe->entry.target_as = &address_space_memory;
tlbe->entry.translated_addr = gpa;
tlbe->entry.iova = ipa & ~mask;
tlbe->entry.addr_mask = mask;
@@ -825,6 +866,17 @@ int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova,
return ret;
}
+ if (!cfg->sel2 && tlbe->sec_sid > SMMU_SEC_SID_NS) {
+ /*
+ * Nested translation with Secure IPA output is not supported if
+ * Secure Stage 2 is not implemented.
+ */
+ info->type = SMMU_PTW_ERR_TRANSLATION;
+ info->stage = SMMU_STAGE_1;
+ tlbe->entry.perm = IOMMU_NONE;
+ return -EINVAL;
+ }
+
ipa = CACHED_ENTRY_TO_ADDR(tlbe, iova);
ret = smmu_ptw_64_s2(cfg, ipa, perm, &tlbe_s2, info);
if (ret) {
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index 96ebd1b11b..a37e894766 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -16,7 +16,7 @@ smmu_ptw_level(int stage, int level, uint64_t iova, size_t subpage_size, uint64_
smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64
smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64
smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB"
-smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64
+smmu_get_pte(int sec_sid, uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "sec_sid=%d baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64""
smmu_iotlb_inv_all(void) "IOTLB invalidate all"
smmu_iotlb_inv_asid_vmid(int asid, int vmid) "IOTLB invalidate asid=%d vmid=%d"
smmu_iotlb_inv_vmid(int vmid) "IOTLB invalidate vmid=%d"
--
2.34.1
On 10/12/25 5:12 PM, Tao Tang wrote:
> Enhance the page table walker to correctly handle secure and non-secure
> memory accesses. This change introduces logic to select the appropriate
> address space and enforce architectural security policies during walks.
>
> The page table walker now correctly processes Secure Stage 1
> translations. Key changes include:
>
> - The get_pte function now uses the security context to fetch table
> entries from either the Secure or Non-secure address space.
>
> - The stage 1 walker tracks the security state, respecting the NSCFG
> and NSTable attributes. It correctly handles the hierarchical security
> model: if a table descriptor in a secure walk has NSTable=1, all
> subsequent lookups for that walk are forced into the Non-secure space.
> This is a one-way transition, as specified by the architecture.
>
> - A check is added to fault nested translations that produce a Secure
> IPA when Secure stage 2 is not supported (SMMU_S_IDR1.SEL2 == 0).
>
> - The final TLB entry is tagged with the correct output address space,
> ensuring proper memory isolation.
>
> Stage 2 translations are currently limited to Non-secure lookups. Full
> support for Secure Stage 2 translation will be added in a future series.
>
> Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
> ---
> hw/arm/smmu-common.c | 64 +++++++++++++++++++++++++++++++++++++++-----
> hw/arm/trace-events | 2 +-
> 2 files changed, 59 insertions(+), 7 deletions(-)
>
> diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
> index 5fabe30c75..a092bb5a8d 100644
> --- a/hw/arm/smmu-common.c
> +++ b/hw/arm/smmu-common.c
> @@ -399,20 +399,26 @@ void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid)
> * @base_addr[@index]
> */
> static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
> - SMMUPTWEventInfo *info)
> + SMMUPTWEventInfo *info, SMMUSecSID sec_sid)
> {
> int ret;
> dma_addr_t addr = baseaddr + index * sizeof(*pte);
> -
> + MemTxAttrs attrs = smmu_get_txattrs(sec_sid);
> + AddressSpace *as = smmu_get_address_space(sec_sid);
> + if (!as) {
> + info->type = SMMU_PTW_ERR_WALK_EABT;
is it WALK_EABT or PERMISSION in that case? I fail to find where it is
specified in the spec. Add a reference once?
> + info->addr = addr;
> + return -EINVAL;
> + }
> /* TODO: guarantee 64-bit single-copy atomicity */
> - ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED);
> + ret = ldq_le_dma(as, addr, pte, attrs);
>
> if (ret != MEMTX_OK) {
> info->type = SMMU_PTW_ERR_WALK_EABT;
> info->addr = addr;
> return -EINVAL;
> }
> - trace_smmu_get_pte(baseaddr, index, addr, *pte);
> + trace_smmu_get_pte(sec_sid, baseaddr, index, addr, *pte);
> return 0;
> }
>
> @@ -543,6 +549,8 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
>
> baseaddr = extract64(tt->ttb, 0, cfg->oas);
> baseaddr &= ~indexmask;
> + int nscfg = tt->nscfg;
> + bool forced_ns = false; /* Track if NSTable=1 forced NS mode */
>
> while (level < VMSA_LEVELS) {
> uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
> @@ -552,7 +560,10 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
> dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
> uint8_t ap;
>
> - if (get_pte(baseaddr, offset, &pte, info)) {
> + /* Use NS if forced by previous NSTable=1 or current nscfg */
> + int current_ns = forced_ns || nscfg;
> + SMMUSecSID sec_sid = current_ns ? SMMU_SEC_SID_NS : SMMU_SEC_SID_S;
> + if (get_pte(baseaddr, offset, &pte, info, sec_sid)) {
> goto error;
> }
> trace_smmu_ptw_level(stage, level, iova, subpage_size,
> @@ -577,6 +588,26 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
> goto error;
> }
> }
> +
> + /*
> + * Hierarchical control of Secure/Non-secure accesses:
> + * If NSTable=1 from Secure space, force all subsequent lookups to
> + * Non-secure space and ignore future NSTable according to
> + * (IHI 0070G.b)13.4.1 Stage 1 page permissions and
> + * (DDI 0487H.a)D8.4.2 Control of Secure or Non-secure memory access
> + */
> + if (!forced_ns) {
> + int new_nstable = PTE_NSTABLE(pte);
> + if (!current_ns && new_nstable) {
> + /* First transition from Secure to Non-secure */
> + forced_ns = true;
> + nscfg = 1;
> + } else if (!forced_ns) {
> + /* Still in original mode, update nscfg normally */
> + nscfg = new_nstable;
> + }
> + /* If forced_ns is already true, ignore NSTable bit */
> + }
> level++;
> continue;
> } else if (is_page_pte(pte, level)) {
> @@ -619,6 +650,13 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
> goto error;
> }
>
> + tlbe->sec_sid = PTE_NS(pte) ? SMMU_SEC_SID_NS : SMMU_SEC_SID_S;
> + tlbe->entry.target_as = smmu_get_address_space(tlbe->sec_sid);
> + if (!tlbe->entry.target_as) {
> + info->type = SMMU_PTW_ERR_WALK_EABT;
> + info->addr = gpa;
> + goto error;
> + }
> tlbe->entry.translated_addr = gpa;
> tlbe->entry.iova = iova & ~mask;
> tlbe->entry.addr_mask = mask;
> @@ -688,7 +726,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
> dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
> uint8_t s2ap;
>
> - if (get_pte(baseaddr, offset, &pte, info)) {
> + /* Use NS as Secure Stage 2 is not implemented (SMMU_S_IDR1.SEL2 == 0)*/
I don't really get this as you passed the sel2 in the cfg?
> + if (get_pte(baseaddr, offset, &pte, info, SMMU_SEC_SID_NS)) {
> goto error;
> }
> trace_smmu_ptw_level(stage, level, ipa, subpage_size,
> @@ -741,6 +780,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
> goto error_ipa;
> }
>
> + tlbe->sec_sid = SMMU_SEC_SID_NS;
> + tlbe->entry.target_as = &address_space_memory;
> tlbe->entry.translated_addr = gpa;
> tlbe->entry.iova = ipa & ~mask;
> tlbe->entry.addr_mask = mask;
> @@ -825,6 +866,17 @@ int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova,
> return ret;
> }
>
> + if (!cfg->sel2 && tlbe->sec_sid > SMMU_SEC_SID_NS) {
> + /*
> + * Nested translation with Secure IPA output is not supported if
> + * Secure Stage 2 is not implemented.
> + */
> + info->type = SMMU_PTW_ERR_TRANSLATION;
pointer to the spec for TRANSLATION error?
Otherwise looks good
Eric
> + info->stage = SMMU_STAGE_1;
> + tlbe->entry.perm = IOMMU_NONE;
> + return -EINVAL;
> + }
> +
> ipa = CACHED_ENTRY_TO_ADDR(tlbe, iova);
> ret = smmu_ptw_64_s2(cfg, ipa, perm, &tlbe_s2, info);
> if (ret) {
> diff --git a/hw/arm/trace-events b/hw/arm/trace-events
> index 96ebd1b11b..a37e894766 100644
> --- a/hw/arm/trace-events
> +++ b/hw/arm/trace-events
> @@ -16,7 +16,7 @@ smmu_ptw_level(int stage, int level, uint64_t iova, size_t subpage_size, uint64_
> smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64
> smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64
> smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB"
> -smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64
> +smmu_get_pte(int sec_sid, uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "sec_sid=%d baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64""
> smmu_iotlb_inv_all(void) "IOTLB invalidate all"
> smmu_iotlb_inv_asid_vmid(int asid, int vmid) "IOTLB invalidate asid=%d vmid=%d"
> smmu_iotlb_inv_vmid(int vmid) "IOTLB invalidate vmid=%d"
Hi Eric,
On 2025/12/2 23:53, Eric Auger wrote:
>
> On 10/12/25 5:12 PM, Tao Tang wrote:
>> Enhance the page table walker to correctly handle secure and non-secure
>> memory accesses. This change introduces logic to select the appropriate
>> address space and enforce architectural security policies during walks.
>>
>> The page table walker now correctly processes Secure Stage 1
>> translations. Key changes include:
>>
>> - The get_pte function now uses the security context to fetch table
>> entries from either the Secure or Non-secure address space.
>>
>> - The stage 1 walker tracks the security state, respecting the NSCFG
>> and NSTable attributes. It correctly handles the hierarchical security
>> model: if a table descriptor in a secure walk has NSTable=1, all
>> subsequent lookups for that walk are forced into the Non-secure space.
>> This is a one-way transition, as specified by the architecture.
>>
>> - A check is added to fault nested translations that produce a Secure
>> IPA when Secure stage 2 is not supported (SMMU_S_IDR1.SEL2 == 0).
>>
>> - The final TLB entry is tagged with the correct output address space,
>> ensuring proper memory isolation.
>>
>> Stage 2 translations are currently limited to Non-secure lookups. Full
>> support for Secure Stage 2 translation will be added in a future series.
>>
>> Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
>> ---
>> hw/arm/smmu-common.c | 64 +++++++++++++++++++++++++++++++++++++++-----
>> hw/arm/trace-events | 2 +-
>> 2 files changed, 59 insertions(+), 7 deletions(-)
>>
>> diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
>> index 5fabe30c75..a092bb5a8d 100644
>> --- a/hw/arm/smmu-common.c
>> +++ b/hw/arm/smmu-common.c
>> @@ -399,20 +399,26 @@ void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid)
>> * @base_addr[@index]
>> */
>> static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
>> - SMMUPTWEventInfo *info)
>> + SMMUPTWEventInfo *info, SMMUSecSID sec_sid)
>> {
>> int ret;
>> dma_addr_t addr = baseaddr + index * sizeof(*pte);
>> -
>> + MemTxAttrs attrs = smmu_get_txattrs(sec_sid);
>> + AddressSpace *as = smmu_get_address_space(sec_sid);
>> + if (!as) {
>> + info->type = SMMU_PTW_ERR_WALK_EABT;
> is it WALK_EABT or PERMISSION in that case? I fail to find where it is
> specified in the spec. Add a reference once?
Maybe this is the same situation I described earlier in the previous
thread [1]? I’m still not confident there is a clear architected
mapping for this condition to a specific PTW event type. Rather than
arbitrarily picking WALK_EABT or PERMISSION, I am leaning towards
treating it as a pure model bug:
I’ll switch this to a g_assert(as) so we don’t report an architected
event for something that should never happen on a correctly wired
machine model.
[1]
https://lore.kernel.org/qemu-devel/e80c6fbc-47a4-490a-8615-be2ee122eb94@phytium.com.cn/
>> + info->addr = addr;
>> + return -EINVAL;
>> + }
>> /* TODO: guarantee 64-bit single-copy atomicity */
>> - ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED);
>> + ret = ldq_le_dma(as, addr, pte, attrs);
>>
>> ------------------------------<snip>------------------------------
>>
>>
>>
>> ------------------------------<snip>------------------------------
>> tlbe->entry.translated_addr = gpa;
>> tlbe->entry.iova = iova & ~mask;
>> tlbe->entry.addr_mask = mask;
>> @@ -688,7 +726,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
>> dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
>> uint8_t s2ap;
>>
>> - if (get_pte(baseaddr, offset, &pte, info)) {
>> + /* Use NS as Secure Stage 2 is not implemented (SMMU_S_IDR1.SEL2 == 0)*/
> I don't really get this as you passed the sel2 in the cfg?
In the next revision I’ll simplify the story. SMMUTransCfg will no
longer carry a sel2 field, and this series will explicitly not support
Secure Stage 2. In that context, the Stage-2 PTW will be hard-coded to
use SMMU_SEC_SID_NS. If/when we add SEL2 support in a follow-up series,
we can then make this driven by configuration instead of hard-coded.
>> + if (get_pte(baseaddr, offset, &pte, info, SMMU_SEC_SID_NS)) {
>> goto error;
>> }
>> trace_smmu_ptw_level(stage, level, ipa, subpage_size,
>> @@ -741,6 +780,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
>> goto error_ipa;
>> }
>>
>> + tlbe->sec_sid = SMMU_SEC_SID_NS;
>> + tlbe->entry.target_as = &address_space_memory;
>> tlbe->entry.translated_addr = gpa;
>> tlbe->entry.iova = ipa & ~mask;
>> tlbe->entry.addr_mask = mask;
>> @@ -825,6 +866,17 @@ int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova,
>> return ret;
>> }
>>
>> + if (!cfg->sel2 && tlbe->sec_sid > SMMU_SEC_SID_NS) {
>> + /*
>> + * Nested translation with Secure IPA output is not supported if
>> + * Secure Stage 2 is not implemented.
>> + */
>> + info->type = SMMU_PTW_ERR_TRANSLATION;
> pointer to the spec for TRANSLATION error?
>
> Otherwise looks good
>
> Eric
After re-reading the spec, I think we should move the check earlier,
when decoding the STE/CD, and use the combination of SMMU_S_IDR1.SEL2,
Config == 0b11x, and the Secure Stream table context to detect an
architecturally illegal nested configuration.
In that case I’ll report a C_BAD_STE-style configuration error and bail
out before running any Secure Stage-1 page walk. That both matches the
spec more closely and avoids doing extra work in this unsupported
configuration. How do you think about this?
Thanks again your review.
Tao
© 2016 - 2026 Red Hat, Inc.