[Xen-devel] [PATCH 5/5] iommu / pci: re-implement XEN_DOMCTL_get_device_group...

Paul Durrant posted 5 patches 1 year, 11 months ago

[Xen-devel] [PATCH 5/5] iommu / pci: re-implement XEN_DOMCTL_get_device_group...

Posted by Paul Durrant 1 year, 11 months ago
... using the new iommu_group infrastructure.

Because 'sibling' devices are now members of the same iommu_group,
implement the domctl by looking up the relevant iommu_group and walking
the membership list.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
---
 xen/drivers/passthrough/iommu.c | 65 +++++++++++++++++++++++++++++++++++++++++
 xen/drivers/passthrough/pci.c   | 47 -----------------------------
 xen/include/xen/iommu.h         |  2 ++
 3 files changed, 67 insertions(+), 47 deletions(-)

diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index 11319fbaae..49140c652e 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -729,6 +729,71 @@ int iommu_group_assign(struct pci_dev *pdev)
     return 0;
 }
 
+static struct iommu_group *iommu_group_lookup(uint16_t seg, uint8_t bus,
+                                              uint8_t devfn)
+{
+    unsigned int id = 0;
+    struct iommu_group *grp;
+
+    while ( radix_tree_gang_lookup(&iommu_groups, (void **)&grp, id, 1) )
+    {
+        struct pci_dev *pdev;
+
+        list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list )
+            if ( pdev->seg == seg && pdev->bus == bus &&
+                 pdev->devfn == devfn )
+                return grp;
+
+        id = grp->id + 1;
+    }
+
+    return NULL;
+}
+
+int iommu_get_device_group(struct domain *d, u16 seg, u8 bus, u8 devfn,
+                           XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
+{
+    struct iommu_group *grp;
+    struct pci_dev *pdev;
+    int i = 0;
+
+    pcidevs_lock();
+
+    grp = iommu_group_lookup(seg, bus, devfn);
+    if ( !grp )
+    {
+        pcidevs_unlock();
+        return 0;
+    }
+
+    list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list )
+    {
+        uint32_t sbdf;
+
+        if ( i >= max_sdevs )
+            break;
+
+        if ( pdev->domain != d )
+            continue;
+
+        sbdf = PCI_SBDF3(pdev->seg, pdev->bus, pdev->devfn);
+
+        if ( xsm_get_device_group(XSM_HOOK, sbdf) )
+            continue;
+
+        if ( unlikely(copy_to_guest_offset(buf, i, &sbdf, 1)) )
+        {
+            pcidevs_unlock();
+            return -1;
+        }
+        i++;
+    }
+
+    pcidevs_unlock();
+
+    return i;
+}
+
 #endif /* CONFIG_HAS_PCI */
 
 /*
diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
index 6210409741..68b2883ed6 100644
--- a/xen/drivers/passthrough/pci.c
+++ b/xen/drivers/passthrough/pci.c
@@ -1554,53 +1554,6 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
     return ret;
 }
 
-static int iommu_get_device_group(
-    struct domain *d, u16 seg, u8 bus, u8 devfn,
-    XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
-{
-    const struct domain_iommu *hd = dom_iommu(d);
-    struct pci_dev *pdev;
-    int group_id, sdev_id;
-    u32 bdf;
-    int i = 0;
-    const struct iommu_ops *ops = hd->platform_ops;
-
-    if ( !iommu_enabled || !ops || !ops->get_device_group_id )
-        return 0;
-
-    group_id = ops->get_device_group_id(seg, bus, devfn);
-
-    pcidevs_lock();
-    for_each_pdev( d, pdev )
-    {
-        if ( (pdev->seg != seg) ||
-             ((pdev->bus == bus) && (pdev->devfn == devfn)) )
-            continue;
-
-        if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (pdev->bus << 8) | pdev->devfn) )
-            continue;
-
-        sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn);
-        if ( (sdev_id == group_id) && (i < max_sdevs) )
-        {
-            bdf = 0;
-            bdf |= (pdev->bus & 0xff) << 16;
-            bdf |= (pdev->devfn & 0xff) << 8;
-
-            if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
-            {
-                pcidevs_unlock();
-                return -1;
-            }
-            i++;
-        }
-    }
-
-    pcidevs_unlock();
-
-    return i;
-}
-
 void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev)
 {
     pcidevs_lock();
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 6d6aa12314..c4e1604bb8 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -319,6 +319,8 @@ extern struct page_list_head iommu_pt_cleanup_list;
 
 void iommu_groups_init(void);
 int iommu_group_assign(struct pci_dev *pdev);
+int iommu_get_device_group(struct domain *d, u16 seg, u8 bus, u8 devfn,
+                           XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs);
 
 #endif /* CONFIG_HAS_PCI */
 
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 5/5] iommu / pci: re-implement XEN_DOMCTL_get_device_group...

Posted by Roger Pau Monné 1 year, 11 months ago
On Wed, May 08, 2019 at 02:24:03PM +0100, Paul Durrant wrote:
> ... using the new iommu_group infrastructure.
> 
> Because 'sibling' devices are now members of the same iommu_group,
> implement the domctl by looking up the relevant iommu_group and walking
> the membership list.
> 
> Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> Cc: Jan Beulich <jbeulich@suse.com>
> ---
>  xen/drivers/passthrough/iommu.c | 65 +++++++++++++++++++++++++++++++++++++++++
>  xen/drivers/passthrough/pci.c   | 47 -----------------------------
>  xen/include/xen/iommu.h         |  2 ++
>  3 files changed, 67 insertions(+), 47 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
> index 11319fbaae..49140c652e 100644
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -729,6 +729,71 @@ int iommu_group_assign(struct pci_dev *pdev)
>      return 0;
>  }
>  
> +static struct iommu_group *iommu_group_lookup(uint16_t seg, uint8_t bus,
> +                                              uint8_t devfn)

Could you use pci_sbdf_t to pass the SBDF?

> +{
> +    unsigned int id = 0;
> +    struct iommu_group *grp;
> +
> +    while ( radix_tree_gang_lookup(&iommu_groups, (void **)&grp, id, 1) )
> +    {
> +        struct pci_dev *pdev;
> +
> +        list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list )
> +            if ( pdev->seg == seg && pdev->bus == bus &&
> +                 pdev->devfn == devfn )
> +                return grp;
> +
> +        id = grp->id + 1;
> +    }
> +
> +    return NULL;
> +}
> +
> +int iommu_get_device_group(struct domain *d, u16 seg, u8 bus, u8 devfn,

Using pci_sbdf_t would be better here to pass the SBDF IMO, or
uint<size>_t, or just plain unsigned int.

Also, I wonder about the usefulness of the domain parameter, shouldn't
you do the ownership check somewhere else (if required) and have this
function just check the IOMMU group of a given PCI  device?

(Note you probably want to constify the domain parameter if it needs to
stay).

> +                           XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
> +{
> +    struct iommu_group *grp;
> +    struct pci_dev *pdev;
> +    int i = 0;

It seems like this should be unsigned int?

> +
> +    pcidevs_lock();
> +
> +    grp = iommu_group_lookup(seg, bus, devfn);
> +    if ( !grp )
> +    {
> +        pcidevs_unlock();
> +        return 0;
> +    }
> +
> +    list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list )
> +    {
> +        uint32_t sbdf;
> +
> +        if ( i >= max_sdevs )
> +            break;
> +
> +        if ( pdev->domain != d )
> +            continue;
> +
> +        sbdf = PCI_SBDF3(pdev->seg, pdev->bus, pdev->devfn);
> +
> +        if ( xsm_get_device_group(XSM_HOOK, sbdf) )
> +            continue;
> +
> +        if ( unlikely(copy_to_guest_offset(buf, i, &sbdf, 1)) )
> +        {
> +            pcidevs_unlock();
> +            return -1;

-EFAULT?

> +        }
> +        i++;
> +    }
> +
> +    pcidevs_unlock();
> +
> +    return i;
> +}
> +
>  #endif /* CONFIG_HAS_PCI */
>  
>  /*
> diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c
> index 6210409741..68b2883ed6 100644
> --- a/xen/drivers/passthrough/pci.c
> +++ b/xen/drivers/passthrough/pci.c
> @@ -1554,53 +1554,6 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
>      return ret;
>  }
>  
> -static int iommu_get_device_group(
> -    struct domain *d, u16 seg, u8 bus, u8 devfn,
> -    XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)

Oh, I see this is code movement and changes to an existing function,
hence my comments above might be stale, or will require to fixup the
callers which might be cumbersome.

Thanks, Roger.

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Re: [Xen-devel] [PATCH 5/5] iommu / pci: re-implement XEN_DOMCTL_get_device_group...

Posted by Paul Durrant 1 year, 10 months ago
> -----Original Message-----
> From: Roger Pau Monne
> Sent: 15 May 2019 10:07
> To: Paul Durrant <Paul.Durrant@citrix.com>
> Cc: xen-devel@lists.xenproject.org; Jan Beulich <jbeulich@suse.com>
> Subject: Re: [Xen-devel] [PATCH 5/5] iommu / pci: re-implement XEN_DOMCTL_get_device_group...
> 
> On Wed, May 08, 2019 at 02:24:03PM +0100, Paul Durrant wrote:
> > ... using the new iommu_group infrastructure.
> >
> > Because 'sibling' devices are now members of the same iommu_group,
> > implement the domctl by looking up the relevant iommu_group and walking
> > the membership list.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
> > ---
> > Cc: Jan Beulich <jbeulich@suse.com>
> > ---
> >  xen/drivers/passthrough/iommu.c | 65 +++++++++++++++++++++++++++++++++++++++++
> >  xen/drivers/passthrough/pci.c   | 47 -----------------------------
> >  xen/include/xen/iommu.h         |  2 ++
> >  3 files changed, 67 insertions(+), 47 deletions(-)
> >
> > diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
> > index 11319fbaae..49140c652e 100644
> > --- a/xen/drivers/passthrough/iommu.c
> > +++ b/xen/drivers/passthrough/iommu.c
> > @@ -729,6 +729,71 @@ int iommu_group_assign(struct pci_dev *pdev)
> >      return 0;
> >  }
> >
> > +static struct iommu_group *iommu_group_lookup(uint16_t seg, uint8_t bus,
> > +                                              uint8_t devfn)
> 
> Could you use pci_sbdf_t to pass the SBDF?
> 

Probably, I'd not noticed its existence so I'll use it when I can.

> > +{
> > +    unsigned int id = 0;
> > +    struct iommu_group *grp;
> > +
> > +    while ( radix_tree_gang_lookup(&iommu_groups, (void **)&grp, id, 1) )
> > +    {
> > +        struct pci_dev *pdev;
> > +
> > +        list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list )
> > +            if ( pdev->seg == seg && pdev->bus == bus &&
> > +                 pdev->devfn == devfn )
> > +                return grp;
> > +
> > +        id = grp->id + 1;
> > +    }
> > +
> > +    return NULL;
> > +}
> > +
> > +int iommu_get_device_group(struct domain *d, u16 seg, u8 bus, u8 devfn,
> 
> Using pci_sbdf_t would be better here to pass the SBDF IMO, or
> uint<size>_t, or just plain unsigned int.
> 
> Also, I wonder about the usefulness of the domain parameter, shouldn't
> you do the ownership check somewhere else (if required) and have this
> function just check the IOMMU group of a given PCI  device?
> 
> (Note you probably want to constify the domain parameter if it needs to
> stay).

Yes and no. This is the implementation of an existing domctl so it's semantics are baked in. I think I can use pci_sbdf_t but the domain parameter needs to stay.

> 
> > +                           XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
> > +{
> > +    struct iommu_group *grp;
> > +    struct pci_dev *pdev;
> > +    int i = 0;
> 
> It seems like this should be unsigned int?
> 

Yes, I guess it could be.

> > +
> > +    pcidevs_lock();
> > +
> > +    grp = iommu_group_lookup(seg, bus, devfn);
> > +    if ( !grp )
> > +    {
> > +        pcidevs_unlock();
> > +        return 0;
> > +    }
> > +
> > +    list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list )
> > +    {
> > +        uint32_t sbdf;
> > +
> > +        if ( i >= max_sdevs )
> > +            break;
> > +
> > +        if ( pdev->domain != d )
> > +            continue;
> > +
> > +        sbdf = PCI_SBDF3(pdev->seg, pdev->bus, pdev->devfn);
> > +
> > +        if ( xsm_get_device_group(XSM_HOOK, sbdf) )
> > +            continue;
> > +
> > +        if ( unlikely(copy_to_guest_offset(buf, i, &sbdf, 1)) )
> > +        {
> > +            pcidevs_unlock();
> > +            return -1;
> 
> -EFAULT?
> 

Yes... then I can get rid of the override of the ret value in the calling code.

  Paul

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel