Per the specification, and as observed in hardware, the SMMUv3 aligns
the SMMU_STRTAB_BASE address to the size of the table by masking out the
respective least significant bits in the ADDR field.
Apply this masking logic to our smmu_find_ste() lookup function per the
specification.
ref. ARM IHI 0070C, section 6.3.23.
Signed-off-by: Simon Veith <sveith@amazon.de>
Cc: Eric Auger <eric.auger@redhat.com>
Cc: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org
---
hw/arm/smmuv3.c | 29 +++++++++++++++++++++++++----
1 file changed, 25 insertions(+), 4 deletions(-)
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
index aad4639..2d6c275 100644
--- a/hw/arm/smmuv3.c
+++ b/hw/arm/smmuv3.c
@@ -376,8 +376,9 @@ bad_ste:
static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
SMMUEventInfo *event)
{
- dma_addr_t addr;
+ dma_addr_t addr, strtab_base;
uint32_t log2size;
+ int strtab_size_shift;
int ret;
trace_smmuv3_find_ste(sid, s->features, s->sid_split);
@@ -391,10 +392,23 @@ static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
}
if (s->features & SMMU_FEATURE_2LVL_STE) {
int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
- dma_addr_t strtab_base, l1ptr, l2ptr;
+ dma_addr_t l1ptr, l2ptr;
STEDesc l1std;
- strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
+ /*
+ * Align strtab base address to table size. For this purpose, assume it
+ * is not bounded by SMMU_IDR1_SIDSIZE.
+ */
+ strtab_size_shift = log2size - s->sid_split - 1 + 3;
+ if (strtab_size_shift < DMA_ADDR_BITS) {
+ if (strtab_size_shift < 5) {
+ strtab_size_shift = 5;
+ }
+ strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
+ ~((1ULL << strtab_size_shift) - 1);
+ } else {
+ strtab_base = 0;
+ }
l1_ste_offset = sid >> s->sid_split;
l2_ste_offset = sid & ((1 << s->sid_split) - 1);
l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
@@ -433,7 +447,14 @@ static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
}
addr = l2ptr + l2_ste_offset * sizeof(*ste);
} else {
- addr = (s->strtab_base & SMMU_BASE_ADDR_MASK) + sid * sizeof(*ste);
+ strtab_size_shift = log2size + 5;
+ if (strtab_size_shift < DMA_ADDR_BITS) {
+ strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
+ ~((1ULL << strtab_size_shift) - 1);
+ } else {
+ strtab_base = 0;
+ }
+ addr = strtab_base + sid * sizeof(*ste);
}
if (smmu_get_ste(s, addr, ste, event)) {
--
2.7.4
Hi Simon,
On 12/4/19 2:55 PM, Simon Veith wrote:
> Per the specification, and as observed in hardware, the SMMUv3 aligns
> the SMMU_STRTAB_BASE address to the size of the table by masking out the
> respective least significant bits in the ADDR field.
>
> Apply this masking logic to our smmu_find_ste() lookup function per the
> specification.
>
> ref. ARM IHI 0070C, section 6.3.23.
>
> Signed-off-by: Simon Veith <sveith@amazon.de>
> Cc: Eric Auger <eric.auger@redhat.com>
> Cc: qemu-devel@nongnu.org
> Cc: qemu-arm@nongnu.org
> ---
> hw/arm/smmuv3.c | 29 +++++++++++++++++++++++++----
> 1 file changed, 25 insertions(+), 4 deletions(-)
>
> diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c
> index aad4639..2d6c275 100644
> --- a/hw/arm/smmuv3.c
> +++ b/hw/arm/smmuv3.c
> @@ -376,8 +376,9 @@ bad_ste:
> static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
> SMMUEventInfo *event)
> {
> - dma_addr_t addr;
> + dma_addr_t addr, strtab_base;
> uint32_t log2size;
> + int strtab_size_shift;
> int ret;
>
> trace_smmuv3_find_ste(sid, s->features, s->sid_split);
> @@ -391,10 +392,23 @@ static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
> }
> if (s->features & SMMU_FEATURE_2LVL_STE) {
> int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
> - dma_addr_t strtab_base, l1ptr, l2ptr;
> + dma_addr_t l1ptr, l2ptr;
> STEDesc l1std;
>
> - strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
> + /*
> + * Align strtab base address to table size. For this purpose, assume it
> + * is not bounded by SMMU_IDR1_SIDSIZE.
> + */
> + strtab_size_shift = log2size - s->sid_split - 1 + 3;
Maybe just as the spec use MAX(5, (LOG2SIZE-SPLIT-1+3))?
> + if (strtab_size_shift < DMA_ADDR_BITS) {
> + if (strtab_size_shift < 5) {
> + strtab_size_shift = 5;
> + }
> + strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
> + ~((1ULL << strtab_size_shift) - 1);
nit: you may use ~MAKE_64BIT_MASK(0, strtab_size_shift)
> + } else {
see below
> + strtab_base = 0;
> + }
> l1_ste_offset = sid >> s->sid_split;
> l2_ste_offset = sid & ((1 << s->sid_split) - 1);
> l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
> @@ -433,7 +447,14 @@ static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
> }
> addr = l2ptr + l2_ste_offset * sizeof(*ste);
> } else {
> - addr = (s->strtab_base & SMMU_BASE_ADDR_MASK) + sid * sizeof(*ste);
> + strtab_size_shift = log2size + 5;
> + if (strtab_size_shift < DMA_ADDR_BITS) {
> + strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
> + ~((1ULL << strtab_size_shift) - 1);
> + } else {
Can it happen? I understand LOG2SIZE <= SMMU_S_IDR_5.SIDSIZE
and SIDSIZE is max 32. Same above?
> + strtab_base = 0;
> + }
> + addr = strtab_base + sid * sizeof(*ste);
> }
>
> if (smmu_get_ste(s, addr, ste, event)) {
>
Thank you for those series fixes.
Best Regards
Eric
© 2016 - 2026 Red Hat, Inc.