Introduce a helper function to determine the largest possible mapping
that allows covering a request (or the next part of it that is left to
be processed).
In order to not add yet more recurring dfn_add() / mfn_add() to the two
callers of the new helper, also introduce local variables holding the
values presently operated on.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -260,12 +260,38 @@ void iommu_domain_destroy(struct domain
arch_iommu_domain_destroy(d);
}
-int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
+static unsigned int mapping_order(const struct domain_iommu *hd,
+ dfn_t dfn, mfn_t mfn, unsigned long nr)
+{
+ unsigned long res = dfn_x(dfn) | mfn_x(mfn);
+ unsigned long sizes = hd->platform_ops->page_sizes;
+ unsigned int bit = find_first_set_bit(sizes), order = 0;
+
+ ASSERT(bit == PAGE_SHIFT);
+
+ while ( (sizes = (sizes >> bit) & ~1) )
+ {
+ unsigned long mask;
+
+ bit = find_first_set_bit(sizes);
+ mask = (1UL << bit) - 1;
+ if ( nr <= mask || (res & mask) )
+ break;
+ order += bit;
+ nr >>= bit;
+ res >>= bit;
+ }
+
+ return order;
+}
+
+int iommu_map(struct domain *d, dfn_t dfn0, mfn_t mfn0,
unsigned long page_count, unsigned int flags,
unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
+ unsigned int order;
int rc = 0;
if ( !is_iommu_enabled(d) )
@@ -273,10 +299,16 @@ int iommu_map(struct domain *d, dfn_t df
ASSERT(!IOMMUF_order(flags));
- for ( i = 0; i < page_count; i++ )
+ for ( i = 0; i < page_count; i += 1UL << order )
{
- rc = iommu_call(hd->platform_ops, map_page, d, dfn_add(dfn, i),
- mfn_add(mfn, i), flags, flush_flags);
+ dfn_t dfn = dfn_add(dfn0, i);
+ mfn_t mfn = mfn_add(mfn0, i);
+ unsigned long j;
+
+ order = mapping_order(hd, dfn, mfn, page_count - i);
+
+ rc = iommu_call(hd->platform_ops, map_page, d, dfn, mfn,
+ flags | IOMMUF_order(order), flush_flags);
if ( likely(!rc) )
continue;
@@ -284,14 +316,18 @@ int iommu_map(struct domain *d, dfn_t df
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU mapping dfn %"PRI_dfn" to mfn %"PRI_mfn" failed: %d\n",
- d->domain_id, dfn_x(dfn_add(dfn, i)),
- mfn_x(mfn_add(mfn, i)), rc);
+ d->domain_id, dfn_x(dfn), mfn_x(mfn), rc);
+
+ for ( j = 0; j < i; j += 1UL << order )
+ {
+ dfn = dfn_add(dfn0, j);
+ order = mapping_order(hd, dfn, _mfn(0), i - j);
- while ( i-- )
/* if statement to satisfy __must_check */
- if ( iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
- 0, flush_flags) )
+ if ( iommu_call(hd->platform_ops, unmap_page, d, dfn, order,
+ flush_flags) )
continue;
+ }
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -322,20 +358,25 @@ int iommu_legacy_map(struct domain *d, d
return rc;
}
-int iommu_unmap(struct domain *d, dfn_t dfn, unsigned long page_count,
+int iommu_unmap(struct domain *d, dfn_t dfn0, unsigned long page_count,
unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
+ unsigned int order;
int rc = 0;
if ( !is_iommu_enabled(d) )
return 0;
- for ( i = 0; i < page_count; i++ )
+ for ( i = 0; i < page_count; i += 1UL << order )
{
- int err = iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
- 0, flush_flags);
+ dfn_t dfn = dfn_add(dfn0, i);
+ int err;
+
+ order = mapping_order(hd, dfn, _mfn(0), page_count - i);
+ err = iommu_call(hd->platform_ops, unmap_page, d, dfn,
+ order, flush_flags);
if ( likely(!err) )
continue;
@@ -343,7 +384,7 @@ int iommu_unmap(struct domain *d, dfn_t
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"d%d: IOMMU unmapping dfn %"PRI_dfn" failed: %d\n",
- d->domain_id, dfn_x(dfn_add(dfn, i)), err);
+ d->domain_id, dfn_x(dfn), err);
if ( !rc )
rc = err;