We only need checking the alignment on the upper caller function, if those
address and/or sizes are pass the check. Those address will certainly pass
the same test under the etnaviv_context_unmap() function.
Remove redundant alignment tests in etnaviv_context_unmap() and move the
alignment tests to the upper caller etnaviv_iommu_map() function. Which
reduce some extra overhead.
Signed-off-by: Sui Jingfeng <sui.jingfeng@linux.dev>
---
drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 05021848126e..56ae2de76e15 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -19,12 +19,6 @@ static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
size_t unmapped_page, unmapped = 0;
size_t pgsize = SZ_4K;
- if (!IS_ALIGNED(iova | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
- iova, size, pgsize);
- return;
- }
-
while (unmapped < size) {
unmapped_page = context->global->ops->unmap(context, iova,
pgsize);
@@ -45,12 +39,6 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
size_t orig_size = size;
int ret = 0;
- if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
- iova, &paddr, size, pgsize);
- return -EINVAL;
- }
-
while (size) {
ret = context->global->ops->map(context, iova, paddr, pgsize,
prot);
@@ -88,6 +76,14 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
+ if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
+ dev_err(context->global->dev,
+ "unaligned: iova 0x%x pa %pa size 0x%x\n",
+ iova, &pa, bytes);
+ ret = -EINVAL;
+ goto fail;
+ }
+
ret = etnaviv_context_map(context, da, pa, bytes, prot);
if (ret)
goto fail;
--
2.34.1