This will be used to process IOMMUs in a MemoryRegionCache. This
includes a small bugfix, in that the returned page_mask is now
correctly -1 if the IOMMU memory region maps the entire address
space directly. Previously, address_space_get_iotlb_entry would
return ~TARGET_PAGE_MASK.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
exec.c | 113 +++++++++++++++++++++++++++++++++++++++------------------
1 file changed, 78 insertions(+), 35 deletions(-)
diff --git a/exec.c b/exec.c
index 1dea3e9c95..868f89c345 100644
--- a/exec.c
+++ b/exec.c
@@ -461,6 +461,72 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
return section;
}
+/**
+ * address_space_translate_iommu - translate an address through an IOMMU
+ * memory region and then through the target address space.
+ *
+ * @iommu_mr: the IOMMU memory region that we start the translation from
+ * @addr: the address to be translated through the MMU
+ * @xlat: the translated address offset within the destination memory region.
+ * It cannot be %NULL.
+ * @plen_out: valid read/write length of the translated address. It
+ * cannot be %NULL.
+ * @page_mask_out: page mask for the translated address. This
+ * should only be meaningful for IOMMU translated
+ * addresses, since there may be huge pages that this bit
+ * would tell. It can be %NULL if we don't care about it.
+ * @is_write: whether the translation operation is for write
+ * @is_mmio: whether this can be MMIO, set true if it can
+ * @target_as: the address space targeted by the IOMMU
+ *
+ * This function is called from RCU critical section.
+ */
+static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
+ hwaddr *xlat,
+ hwaddr *plen_out,
+ hwaddr *page_mask_out,
+ bool is_write,
+ bool is_mmio,
+ AddressSpace **target_as)
+{
+ MemoryRegionSection *section;
+ hwaddr page_mask = (hwaddr)-1;
+
+ do {
+ hwaddr addr = *xlat;
+ IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
+ IOMMUTLBEntry iotlb = imrc->translate(iommu_mr, addr, is_write ?
+ IOMMU_WO : IOMMU_RO);
+
+ if (!(iotlb.perm & (1 << is_write))) {
+ goto unassigned;
+ }
+
+ addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
+ | (addr & iotlb.addr_mask));
+ page_mask &= iotlb.addr_mask;
+ *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
+ *target_as = iotlb.target_as;
+
+ section = address_space_translate_internal(
+ address_space_to_dispatch(iotlb.target_as), addr, xlat,
+ plen_out, is_mmio);
+ if (!section) {
+ goto unassigned;
+ }
+
+ iommu_mr = memory_region_get_iommu(section->mr);
+ } while (iommu_mr);
+
+ if (page_mask_out) {
+ *page_mask_out = page_mask;
+ }
+ return *section;
+
+unassigned:
+ return (MemoryRegionSection) { .mr = &io_mem_unassigned };
+}
+
/**
* flatview_do_translate - translate an address in FlatView
*
@@ -489,55 +556,31 @@ static MemoryRegionSection flatview_do_translate(FlatView *fv,
bool is_mmio,
AddressSpace **target_as)
{
- IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
IOMMUMemoryRegion *iommu_mr;
- IOMMUMemoryRegionClass *imrc;
- hwaddr page_mask = (hwaddr)(-1);
hwaddr plen = (hwaddr)(-1);
if (!plen_out) {
plen_out = &plen;
}
- for (;;) {
- section = address_space_translate_internal(
- flatview_to_dispatch(fv), addr, xlat,
- plen_out, is_mmio);
+ section = address_space_translate_internal(
+ flatview_to_dispatch(fv), addr, xlat,
+ plen_out, is_mmio);
- iommu_mr = memory_region_get_iommu(section->mr);
- if (!iommu_mr) {
- break;
- }
- imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
-
- addr = *xlat;
- iotlb = imrc->translate(iommu_mr, addr, is_write ?
- IOMMU_WO : IOMMU_RO);
- if (!(iotlb.perm & (1 << is_write))) {
- goto translate_fail;
- }
-
- addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
- | (addr & iotlb.addr_mask));
- page_mask &= iotlb.addr_mask;
- *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
- fv = address_space_to_flatview(iotlb.target_as);
- *target_as = iotlb.target_as;
+ iommu_mr = memory_region_get_iommu(section->mr);
+ if (unlikely(iommu_mr)) {
+ return address_space_translate_iommu(iommu_mr, xlat,
+ plen_out, page_mask_out,
+ is_write, is_mmio,
+ target_as);
}
-
if (page_mask_out) {
- if (page_mask == (hwaddr)(-1)) {
- /* Not behind an IOMMU, use default page size. */
- page_mask = ~TARGET_PAGE_MASK;
- }
- *page_mask_out = page_mask;
+ /* Not behind an IOMMU, use default page size. */
+ *page_mask_out = ~TARGET_PAGE_MASK;
}
return *section;
-
-translate_fail:
- return (MemoryRegionSection) { .mr = &io_mem_unassigned };
}
/* Called from RCU critical section */
--
2.17.0
On Tue, Apr 17, 2018 at 04:08:01PM +0200, Paolo Bonzini wrote:
[...]
> +static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion *iommu_mr,
> + hwaddr *xlat,
> + hwaddr *plen_out,
> + hwaddr *page_mask_out,
> + bool is_write,
> + bool is_mmio,
> + AddressSpace **target_as)
> +{
> + MemoryRegionSection *section;
> + hwaddr page_mask = (hwaddr)-1;
> +
> + do {
> + hwaddr addr = *xlat;
> + IOMMUMemoryRegionClass *imrc = memory_region_get_iommu_class_nocheck(iommu_mr);
> + IOMMUTLBEntry iotlb = imrc->translate(iommu_mr, addr, is_write ?
> + IOMMU_WO : IOMMU_RO);
> +
> + if (!(iotlb.perm & (1 << is_write))) {
> + goto unassigned;
> + }
> +
> + addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
> + | (addr & iotlb.addr_mask));
> + page_mask &= iotlb.addr_mask;
> + *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
> + *target_as = iotlb.target_as;
> +
> + section = address_space_translate_internal(
> + address_space_to_dispatch(iotlb.target_as), addr, xlat,
> + plen_out, is_mmio);
> + if (!section) {
> + goto unassigned;
(we won't reach here, will we?)
Reviewed-by: Peter Xu <peterx@redhat.com>
--
Peter Xu
----- Original Message -----
> From: "Peter Xu" <peterx@redhat.com>
> To: "Paolo Bonzini" <pbonzini@redhat.com>
> Cc: qemu-devel@nongnu.org
> Sent: Friday, May 4, 2018 6:22:31 AM
> Subject: Re: [Qemu-devel] [PATCH 3/4] exec: extract address_space_translate_iommu, fix page_mask corner case
>
> On Tue, Apr 17, 2018 at 04:08:01PM +0200, Paolo Bonzini wrote:
>
> [...]
>
> > +static MemoryRegionSection address_space_translate_iommu(IOMMUMemoryRegion
> > *iommu_mr,
> > + hwaddr *xlat,
> > + hwaddr *plen_out,
> > + hwaddr
> > *page_mask_out,
> > + bool is_write,
> > + bool is_mmio,
> > + AddressSpace
> > **target_as)
> > +{
> > + MemoryRegionSection *section;
> > + hwaddr page_mask = (hwaddr)-1;
> > +
> > + do {
> > + hwaddr addr = *xlat;
> > + IOMMUMemoryRegionClass *imrc =
> > memory_region_get_iommu_class_nocheck(iommu_mr);
> > + IOMMUTLBEntry iotlb = imrc->translate(iommu_mr, addr, is_write ?
> > + IOMMU_WO : IOMMU_RO);
> > +
> > + if (!(iotlb.perm & (1 << is_write))) {
> > + goto unassigned;
> > + }
> > +
> > + addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
> > + | (addr & iotlb.addr_mask));
> > + page_mask &= iotlb.addr_mask;
> > + *plen_out = MIN(*plen_out, (addr | iotlb.addr_mask) - addr + 1);
> > + *target_as = iotlb.target_as;
> > +
> > + section = address_space_translate_internal(
> > + address_space_to_dispatch(iotlb.target_as), addr, xlat,
> > + plen_out, is_mmio);
> > + if (!section) {
> > + goto unassigned;
>
> (we won't reach here, will we?)
Good point, I'll fix that.
Paolo
>
> Reviewed-by: Peter Xu <peterx@redhat.com>
>
> --
> Peter Xu
>
© 2016 - 2026 Red Hat, Inc.