This patch introduces the necessary logic to handle security states
during the page table translation process.
Support for the NS (Non-secure) attribute bit is added to the parsing of
various translation structures, including CD and PTEs. This allows the
SMMU model to correctly determine the security properties of memory
during a translation.
With this change, a new translation stage is added:
- Secure Stage 1 translation
Note that this commit does not include support for Secure Stage 2
translation, which will be addressed in the future.
Signed-off-by: Tao Tang <tangtao1634@phytium.com.cn>
---
hw/arm/smmu-common.c | 55 ++++++++++++++++++++++++++++++++----
hw/arm/smmu-internal.h | 7 +++++
hw/arm/smmuv3-internal.h | 2 ++
hw/arm/smmuv3.c | 2 ++
hw/arm/trace-events | 2 +-
include/hw/arm/smmu-common.h | 4 +++
6 files changed, 66 insertions(+), 6 deletions(-)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index bc13b00f1d..f563cba023 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -398,20 +398,25 @@ void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid)
* @base_addr[@index]
*/
static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte,
- SMMUPTWEventInfo *info)
+ SMMUPTWEventInfo *info, SMMUTransCfg *cfg, int walk_ns)
{
int ret;
dma_addr_t addr = baseaddr + index * sizeof(*pte);
+ /* Only support Secure PA Space as RME isn't implemented yet */
+ MemTxAttrs attrs =
+ smmu_get_txattrs(walk_ns ? SMMU_SEC_IDX_NS : SMMU_SEC_IDX_S);
+ AddressSpace *as =
+ smmu_get_address_space(walk_ns ? SMMU_SEC_IDX_NS : SMMU_SEC_IDX_S);
/* TODO: guarantee 64-bit single-copy atomicity */
- ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED);
+ ret = ldq_le_dma(as, addr, pte, attrs);
if (ret != MEMTX_OK) {
info->type = SMMU_PTW_ERR_WALK_EABT;
info->addr = addr;
return -EINVAL;
}
- trace_smmu_get_pte(baseaddr, index, addr, *pte);
+ trace_smmu_get_pte(baseaddr, index, addr, *pte, walk_ns);
return 0;
}
@@ -542,6 +547,8 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
baseaddr = extract64(tt->ttb, 0, cfg->oas);
baseaddr &= ~indexmask;
+ int nscfg = tt->nscfg;
+ bool forced_ns = false; /* Track if NSTable=1 forced NS mode */
while (level < VMSA_LEVELS) {
uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
@@ -551,7 +558,9 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
uint8_t ap;
- if (get_pte(baseaddr, offset, &pte, info)) {
+ /* Use NS if forced by previous NSTable=1 or current nscfg */
+ int current_ns = forced_ns || nscfg;
+ if (get_pte(baseaddr, offset, &pte, info, cfg, current_ns)) {
goto error;
}
trace_smmu_ptw_level(stage, level, iova, subpage_size,
@@ -576,6 +585,26 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
goto error;
}
}
+
+ /*
+ * Hierarchical control of Secure/Non-secure accesses:
+ * If NSTable=1 from Secure space, force all subsequent lookups to
+ * Non-secure space and ignore future NSTable according to
+ * (IHI 0070G.b) 13.4.1 Stage 1 page permissions and
+ * (DDI 0487H.a)D8.4.2 Control of Secure or Non-secure memory access
+ */
+ if (!forced_ns) {
+ int new_nstable = PTE_NSTABLE(pte);
+ if (!current_ns && new_nstable) {
+ /* First transition from Secure to Non-secure */
+ forced_ns = true;
+ nscfg = 1;
+ } else if (!forced_ns) {
+ /* Still in original mode, update nscfg normally */
+ nscfg = new_nstable;
+ }
+ /* If forced_ns is already true, ignore NSTable bit */
+ }
level++;
continue;
} else if (is_page_pte(pte, level)) {
@@ -618,6 +647,8 @@ static int smmu_ptw_64_s1(SMMUState *bs, SMMUTransCfg *cfg,
goto error;
}
+ tlbe->sec_idx = PTE_NS(pte) ? SMMU_SEC_IDX_NS : SMMU_SEC_IDX_S;
+ tlbe->entry.target_as = smmu_get_address_space(tlbe->sec_idx);
tlbe->entry.translated_addr = gpa;
tlbe->entry.iova = iova & ~mask;
tlbe->entry.addr_mask = mask;
@@ -687,7 +718,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
uint8_t s2ap;
- if (get_pte(baseaddr, offset, &pte, info)) {
+ /* Use NS as Secure Stage 2 is not implemented (SMMU_S_IDR1.SEL2 == 0)*/
+ if (get_pte(baseaddr, offset, &pte, info, cfg, 1)) {
goto error;
}
trace_smmu_ptw_level(stage, level, ipa, subpage_size,
@@ -740,6 +772,8 @@ static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
goto error_ipa;
}
+ tlbe->sec_idx = SMMU_SEC_IDX_NS;
+ tlbe->entry.target_as = &address_space_memory;
tlbe->entry.translated_addr = gpa;
tlbe->entry.iova = ipa & ~mask;
tlbe->entry.addr_mask = mask;
@@ -824,6 +858,17 @@ int smmu_ptw(SMMUState *bs, SMMUTransCfg *cfg, dma_addr_t iova,
return ret;
}
+ if (!cfg->sel2 && tlbe->sec_idx > SMMU_SEC_IDX_NS) {
+ /*
+ * Nested translation with Secure IPA output is not supported if
+ * Secure Stage 2 is not implemented.
+ */
+ info->type = SMMU_PTW_ERR_TRANSLATION;
+ info->stage = SMMU_STAGE_1;
+ tlbe->entry.perm = IOMMU_NONE;
+ return -EINVAL;
+ }
+
ipa = CACHED_ENTRY_TO_ADDR(tlbe, iova);
ret = smmu_ptw_64_s2(cfg, ipa, perm, &tlbe_s2, info);
if (ret) {
diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h
index d143d296f3..cb3a6eb8d1 100644
--- a/hw/arm/smmu-internal.h
+++ b/hw/arm/smmu-internal.h
@@ -58,6 +58,10 @@
((level == 3) && \
((pte & ARM_LPAE_PTE_TYPE_MASK) == ARM_LPAE_L3_PTE_TYPE_PAGE))
+/* Non-secure bit */
+#define PTE_NS(pte) \
+ (extract64(pte, 5, 1))
+
/* access permissions */
#define PTE_AP(pte) \
@@ -66,6 +70,9 @@
#define PTE_APTABLE(pte) \
(extract64(pte, 61, 2))
+#define PTE_NSTABLE(pte) \
+ (extract64(pte, 63, 1))
+
#define PTE_AF(pte) \
(extract64(pte, 10, 1))
/*
diff --git a/hw/arm/smmuv3-internal.h b/hw/arm/smmuv3-internal.h
index cf17c405de..af2936cf16 100644
--- a/hw/arm/smmuv3-internal.h
+++ b/hw/arm/smmuv3-internal.h
@@ -704,6 +704,8 @@ static inline int oas2bits(int oas_field)
#define CD_R(x) extract32((x)->word[1], 13, 1)
#define CD_A(x) extract32((x)->word[1], 14, 1)
#define CD_AARCH64(x) extract32((x)->word[1], 9 , 1)
+#define CD_NSCFG0(x) extract32((x)->word[2], 0, 1)
+#define CD_NSCFG1(x) extract32((x)->word[4], 0, 1)
/**
* tg2granule - Decodes the CD translation granule size field according
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index eba709ae2b..2f8494c346 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -832,6 +832,7 @@ static int decode_cd(SMMUv3State *s, SMMUTransCfg *cfg,
tt->ttb = CACHED_ENTRY_TO_ADDR(entry, tt->ttb);
}
+ tt->nscfg = i ? CD_NSCFG1(cd) : CD_NSCFG0(cd);
tt->had = CD_HAD(cd, i);
trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
}
@@ -929,6 +930,7 @@ static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event,
cfg->sec_idx = sec_idx;
cfg->txattrs = smmu_get_txattrs(sec_idx);
cfg->as = smmu_get_address_space(sec_idx);
+ cfg->sel2 = s->bank[SMMU_SEC_IDX_S].idr[1];
if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
SMMUConfigKey *persistent_key = g_new(SMMUConfigKey, 1);
diff --git a/hw/arm/trace-events b/hw/arm/trace-events
index 80cb4d6b04..f99de78655 100644
--- a/hw/arm/trace-events
+++ b/hw/arm/trace-events
@@ -16,7 +16,7 @@ smmu_ptw_level(int stage, int level, uint64_t iova, size_t subpage_size, uint64_
smmu_ptw_invalid_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint32_t offset, uint64_t pte) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" offset=%d pte=0x%"PRIx64
smmu_ptw_page_pte(int stage, int level, uint64_t iova, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t address) "stage=%d level=%d iova=0x%"PRIx64" base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" page address = 0x%"PRIx64
smmu_ptw_block_pte(int stage, int level, uint64_t baseaddr, uint64_t pteaddr, uint64_t pte, uint64_t iova, uint64_t gpa, int bsize_mb) "stage=%d level=%d base@=0x%"PRIx64" pte@=0x%"PRIx64" pte=0x%"PRIx64" iova=0x%"PRIx64" block address = 0x%"PRIx64" block size = %d MiB"
-smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64
+smmu_get_pte(uint64_t baseaddr, int index, uint64_t pteaddr, uint64_t pte, bool ns_walk) "baseaddr=0x%"PRIx64" index=0x%x, pteaddr=0x%"PRIx64", pte=0x%"PRIx64" ns_walk=%d"
smmu_iotlb_inv_all(void) "IOTLB invalidate all"
smmu_iotlb_inv_asid_vmid(int asid, int vmid) "IOTLB invalidate asid=%d vmid=%d"
smmu_iotlb_inv_vmid(int vmid) "IOTLB invalidate vmid=%d"
diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h
index ed21db7728..c27aec8bd4 100644
--- a/include/hw/arm/smmu-common.h
+++ b/include/hw/arm/smmu-common.h
@@ -109,6 +109,7 @@ typedef struct SMMUTransTableInfo {
uint8_t tsz; /* input range, ie. 2^(64 -tsz)*/
uint8_t granule_sz; /* granule page shift */
bool had; /* hierarchical attribute disable */
+ bool nscfg; /* Non-secure attribute of Starting-level TT */
} SMMUTransTableInfo;
typedef struct SMMUTLBEntry {
@@ -116,6 +117,7 @@ typedef struct SMMUTLBEntry {
uint8_t level;
uint8_t granule;
IOMMUAccessFlags parent_perm;
+ SMMUSecurityIndex sec_idx;
} SMMUTLBEntry;
/* Stage-2 configuration. */
@@ -156,6 +158,8 @@ typedef struct SMMUTransCfg {
SMMUSecurityIndex sec_idx; /* cached security index */
MemTxAttrs txattrs; /* cached transaction attributes */
AddressSpace *as; /* cached address space */
+ bool current_walk_ns; /* cached if the current walk is non-secure */
+ bool sel2;
} SMMUTransCfg;
typedef struct SMMUDevice {
--
2.34.1