In preparation for adding stage-2 support, add Stage-2 PTW code.
Only Aarch64 format is supported as stage-1.
Nesting stage-1 and stage-2 is not supported right now.
HTTU is not supported, SW is expected to maintain the Access flag.
This is described in the SMMUv3 manual(IHI 0070.E.a)
"5.2. Stream Table Entry" in "[181] S2AFFD".
This flag determines the behavior on access of a stage-2 page whose
descriptor has AF == 0:
- 0b0: An Access flag fault occurs (stall not supported).
- 0b1: An Access flag fault never occurs.
An Access fault takes priority over a Permission fault.
There are 3 address size checks for stage-2 according to
(IHI 0070.E.a) in "3.4. Address sizes".
- As nesting is not supported, input address is passed directly to
stage-2, and is checked against IAS.
We use cfg->oas to hold the OAS when stage-1 is not used, this is set
in the next patch.
This check is done outside of smmu_ptw_64_s2 as it is not part of
stage-2(it throws stage-1 fault), and the stage-2 function shouldn't
change it's behavior when nesting is supported.
When nesting is supported and we figure out how to combine TLB for
stage-1 and stage-2 we can move this check into the stage-1 function
as described in ARM DDI0487I.a in pseudocode
aarch64/translation/vmsa_translation/AArch64.S1Translate
aarch64/translation/vmsa_translation/AArch64.S1DisabledOutput
- Input to stage-2 is checked against s2t0sz, and throws stage-2
transaltion fault if exceeds it.
- Output of stage-2 is checked against effective PA output range.
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
---
Changes in v4:
- Collected Reviewed-by tag
- s/IHI 0070.E/IHI 0070.E.a
Changes in v3:
- Fix IPA address size check.
- s2cfg.oas renamed to s2cfg.eff_ps.
- s/iova/ipa
- s/ap/s2ap
- s/gran/granule_sz
- use level_shift instead of inline code.
- Add missing brackets in is_permission_fault_s2.
- Use new VMSA_* macros and functions instead of SMMU_*
- Rename pgd_idx to pgd_concat_idx.
- Move SMMU_MAX_S2_CONCAT to STE patch as it is not used here.
Changes in v2:
- Squash S2AFF PTW code.
- Use common functions between stage-1 and stage-2.
- Add checks for IPA and out PA.
---
hw/arm/smmu-common.c | 142 ++++++++++++++++++++++++++++++++++++++++-
hw/arm/smmu-internal.h | 35 ++++++++++
2 files changed, 176 insertions(+), 1 deletion(-)
diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c
index 50391a8c94..3e82eab741 100644
--- a/hw/arm/smmu-common.c
+++ b/hw/arm/smmu-common.c
@@ -363,6 +363,127 @@ error:
return -EINVAL;
}
+/**
+ * smmu_ptw_64_s2 - VMSAv8-64 Walk of the page tables for a given ipa
+ * for stage-2.
+ * @cfg: translation config
+ * @ipa: ipa to translate
+ * @perm: access type
+ * @tlbe: SMMUTLBEntry (out)
+ * @info: handle to an error info
+ *
+ * Return 0 on success, < 0 on error. In case of error, @info is filled
+ * and tlbe->perm is set to IOMMU_NONE.
+ * Upon success, @tlbe is filled with translated_addr and entry
+ * permission rights.
+ */
+static int smmu_ptw_64_s2(SMMUTransCfg *cfg,
+ dma_addr_t ipa, IOMMUAccessFlags perm,
+ SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
+{
+ const int stage = 2;
+ int granule_sz = cfg->s2cfg.granule_sz;
+ /* ARM DDI0487I.a: Table D8-7. */
+ int inputsize = 64 - cfg->s2cfg.tsz;
+ int level = get_start_level(cfg->s2cfg.sl0, granule_sz);
+ int stride = VMSA_STRIDE(granule_sz);
+ int idx = pgd_concat_idx(level, granule_sz, ipa);
+ /*
+ * Get the ttb from concatenated structure.
+ * The offset is the idx * size of each ttb(number of ptes * (sizeof(pte))
+ */
+ uint64_t baseaddr = extract64(cfg->s2cfg.vttb, 0, 48) + (1 << stride) *
+ idx * sizeof(uint64_t);
+ dma_addr_t indexmask = VMSA_IDXMSK(inputsize, stride, level);
+
+ baseaddr &= ~indexmask;
+
+ /*
+ * On input, a stage 2 Translation fault occurs if the IPA is outside the
+ * range configured by the relevant S2T0SZ field of the STE.
+ */
+ if (ipa >= (1ULL << inputsize)) {
+ info->type = SMMU_PTW_ERR_TRANSLATION;
+ goto error;
+ }
+
+ while (level < VMSA_LEVELS) {
+ uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
+ uint64_t mask = subpage_size - 1;
+ uint32_t offset = iova_level_offset(ipa, inputsize, level, granule_sz);
+ uint64_t pte, gpa;
+ dma_addr_t pte_addr = baseaddr + offset * sizeof(pte);
+ uint8_t s2ap;
+
+ if (get_pte(baseaddr, offset, &pte, info)) {
+ goto error;
+ }
+ trace_smmu_ptw_level(stage, level, ipa, subpage_size,
+ baseaddr, offset, pte);
+ if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
+ trace_smmu_ptw_invalid_pte(stage, level, baseaddr,
+ pte_addr, offset, pte);
+ break;
+ }
+
+ if (is_table_pte(pte, level)) {
+ baseaddr = get_table_pte_address(pte, granule_sz);
+ level++;
+ continue;
+ } else if (is_page_pte(pte, level)) {
+ gpa = get_page_pte_address(pte, granule_sz);
+ trace_smmu_ptw_page_pte(stage, level, ipa,
+ baseaddr, pte_addr, pte, gpa);
+ } else {
+ uint64_t block_size;
+
+ gpa = get_block_pte_address(pte, level, granule_sz,
+ &block_size);
+ trace_smmu_ptw_block_pte(stage, level, baseaddr,
+ pte_addr, pte, ipa, gpa,
+ block_size >> 20);
+ }
+
+ /*
+ * If S2AFFD and PTE.AF are 0 => fault. (5.2. Stream Table Entry)
+ * An Access fault takes priority over a Permission fault.
+ */
+ if (!PTE_AF(pte) && !cfg->s2cfg.affd) {
+ info->type = SMMU_PTW_ERR_ACCESS;
+ goto error;
+ }
+
+ s2ap = PTE_AP(pte);
+ if (is_permission_fault_s2(s2ap, perm)) {
+ info->type = SMMU_PTW_ERR_PERMISSION;
+ goto error;
+ }
+
+ /*
+ * The address output from the translation causes a stage 2 Address
+ * Size fault if it exceeds the effective PA output range.
+ */
+ if (gpa >= (1ULL << cfg->s2cfg.eff_ps)) {
+ info->type = SMMU_PTW_ERR_ADDR_SIZE;
+ goto error;
+ }
+
+ tlbe->entry.translated_addr = gpa;
+ tlbe->entry.iova = ipa & ~mask;
+ tlbe->entry.addr_mask = mask;
+ tlbe->entry.perm = s2ap;
+ tlbe->level = level;
+ tlbe->granule = granule_sz;
+ return 0;
+ }
+ info->type = SMMU_PTW_ERR_TRANSLATION;
+
+error:
+ info->stage = 2;
+ tlbe->entry.perm = IOMMU_NONE;
+ return -EINVAL;
+}
+
/**
* smmu_ptw - Walk the page tables for an IOVA, according to @cfg
*
@@ -377,7 +498,26 @@ error:
int smmu_ptw(SMMUTransCfg *cfg, dma_addr_t iova, IOMMUAccessFlags perm,
SMMUTLBEntry *tlbe, SMMUPTWEventInfo *info)
{
- return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info);
+ if (cfg->stage == 1) {
+ return smmu_ptw_64_s1(cfg, iova, perm, tlbe, info);
+ } else if (cfg->stage == 2) {
+ /*
+ * If bypassing stage 1(or unimplemented), the input address is passed
+ * directly to stage 2 as IPA. If the input address of a transaction
+ * exceeds the size of the IAS, a stage 1 Address Size fault occurs.
+ * For AA64, IAS = OAS according to (IHI 0070.E.a) "3.4 Address sizes"
+ */
+ if (iova >= (1ULL << cfg->oas)) {
+ info->type = SMMU_PTW_ERR_ADDR_SIZE;
+ info->stage = 1;
+ tlbe->entry.perm = IOMMU_NONE;
+ return -EINVAL;
+ }
+
+ return smmu_ptw_64_s2(cfg, iova, perm, tlbe, info);
+ }
+
+ g_assert_not_reached();
}
/**
diff --git a/hw/arm/smmu-internal.h b/hw/arm/smmu-internal.h
index 2d75b31953..a9454f914e 100644
--- a/hw/arm/smmu-internal.h
+++ b/hw/arm/smmu-internal.h
@@ -66,6 +66,8 @@
#define PTE_APTABLE(pte) \
(extract64(pte, 61, 2))
+#define PTE_AF(pte) \
+ (extract64(pte, 10, 1))
/*
* TODO: At the moment all transactions are considered as privileged (EL1)
* as IOMMU translation callback does not pass user/priv attributes.
@@ -73,6 +75,9 @@
#define is_permission_fault(ap, perm) \
(((perm) & IOMMU_WO) && ((ap) & 0x2))
+#define is_permission_fault_s2(s2ap, perm) \
+ (!(((s2ap) & (perm)) == (perm)))
+
#define PTE_AP_TO_PERM(ap) \
(IOMMU_ACCESS_FLAG(true, !((ap) & 0x2)))
@@ -96,6 +101,36 @@ uint64_t iova_level_offset(uint64_t iova, int inputsize,
MAKE_64BIT_MASK(0, gsz - 3);
}
+/* FEAT_LPA2 and FEAT_TTST are not implemented. */
+static inline int get_start_level(int sl0 , int granule_sz)
+{
+ /* ARM DDI0487I.a: Table D8-12. */
+ if (granule_sz == 12) {
+ return 2 - sl0;
+ }
+ /* ARM DDI0487I.a: Table D8-22 and Table D8-31. */
+ return 3 - sl0;
+}
+
+/*
+ * Index in a concatenated first level stage-2 page table.
+ * ARM DDI0487I.a: D8.2.2 Concatenated translation tables.
+ */
+static inline int pgd_concat_idx(int start_level, int granule_sz,
+ dma_addr_t ipa)
+{
+ uint64_t ret;
+ /*
+ * Get the number of bits handled by next levels, then any extra bits in
+ * the address should index the concatenated tables. This relation can be
+ * deduced from tables in ARM DDI0487I.a: D8.2.7-9
+ */
+ int shift = level_shift(start_level - 1, granule_sz);
+
+ ret = ipa >> shift;
+ return ret;
+}
+
#define SMMU_IOTLB_ASID(key) ((key).asid)
typedef struct SMMUIOTLBPageInvInfo {
--
2.40.1.606.ga4b1b128d6-goog
© 2016 - 2024 Red Hat, Inc.