Kernel workqueues were disabled due to flawed use of kernel VA and SVA
API. Now That we have the support for attaching PASID to the device's
default domain and the ability to reserve global PASIDs from SVA APIs,
we can re-enable the kernel work queues and use them under DMA API.
We also use non-privileged access for in-kernel DMA to be consistent
with the IOMMU settings. Consequently, interrupt for user privilege is
enabled for work completion IRQs.
Link:https://lore.kernel.org/linux-iommu/20210511194726.GP1002214@nvidia.com/
Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
---
drivers/dma/idxd/device.c | 30 +++++--------------------
drivers/dma/idxd/init.c | 47 +++++++++++++++++++++++++++++++++++----
drivers/dma/idxd/sysfs.c | 7 ------
3 files changed, 48 insertions(+), 36 deletions(-)
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 125652a8bb29..96faf4d3445e 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -299,21 +299,6 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
}
}
-static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
-{
- struct idxd_device *idxd = wq->idxd;
- union wqcfg wqcfg;
- unsigned int offset;
-
- offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
- spin_lock(&idxd->dev_lock);
- wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
- wqcfg.priv = priv;
- wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
- iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
- spin_unlock(&idxd->dev_lock);
-}
-
static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
{
struct idxd_device *idxd = wq->idxd;
@@ -1324,15 +1309,14 @@ int drv_enable_wq(struct idxd_wq *wq)
}
/*
- * In the event that the WQ is configurable for pasid and priv bits.
- * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
- * However, for non-kernel wq, the driver should only set the pasid_en bit for
- * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
+ * In the event that the WQ is configurable for pasid, the driver
+ * should setup the pasid, pasid_en bit. This is true for both kernel
+ * and user shared workqueues. There is no need to setup priv bit in
+ * that in-kernel DMA will also do user privileged requests.
+ * A dedicated wq that is not 'kernel' type will configure pasid and
* pasid_en later on so there is no need to setup.
*/
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
- int priv = 0;
-
if (wq_pasid_enabled(wq)) {
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
@@ -1340,10 +1324,6 @@ int drv_enable_wq(struct idxd_wq *wq)
__idxd_wq_set_pasid_locked(wq, pasid);
}
}
-
- if (is_idxd_wq_kernel(wq))
- priv = 1;
- __idxd_wq_set_priv_locked(wq, priv);
}
rc = 0;
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index f30eef701970..dadc908318aa 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -501,14 +501,52 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
static int idxd_enable_system_pasid(struct idxd_device *idxd)
{
- return -EOPNOTSUPP;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iommu_domain *domain;
+ union gencfg_reg gencfg;
+ ioasid_t pasid;
+ int ret;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
+ return -EPERM;
+
+ pasid = iommu_sva_reserve_pasid(1, dev->iommu->max_pasids);
+ if (pasid == IOMMU_PASID_INVALID)
+ return -ENOSPC;
+
+ ret = iommu_attach_device_pasid(domain, dev, pasid);
+ if (ret) {
+ dev_err(dev, "failed to attach device pasid %d, domain type %d",
+ pasid, domain->type);
+ iommu_sva_unreserve_pasid(pasid);
+ return ret;
+ }
+
+ /* Since we set user privilege for kernel DMA, enable completion IRQ */
+ gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ gencfg.user_int_en = 1;
+ iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+ idxd->pasid = pasid;
+
+ return ret;
}
static void idxd_disable_system_pasid(struct idxd_device *idxd)
{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
+ return;
- iommu_sva_unbind_device(idxd->sva);
+ iommu_detach_device_pasid(domain, dev, idxd->pasid);
+ iommu_sva_unreserve_pasid(idxd->pasid);
idxd->sva = NULL;
+ idxd->pasid = IOMMU_PASID_INVALID;
}
static int idxd_probe(struct idxd_device *idxd)
@@ -530,8 +568,9 @@ static int idxd_probe(struct idxd_device *idxd)
} else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- if (idxd_enable_system_pasid(idxd))
- dev_warn(dev, "No in-kernel DMA with PASID.\n");
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3229dfc78650..09f5c3f2a992 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -944,13 +944,6 @@ static ssize_t wq_name_store(struct device *dev,
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
return -EINVAL;
- /*
- * This is temporarily placed here until we have SVM support for
- * dmaengine.
- */
- if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
- return -EOPNOTSUPP;
-
input = kstrndup(buf, count, GFP_KERNEL);
if (!input)
return -ENOMEM;
--
2.25.1
On 3/2/23 8:59 AM, Jacob Pan wrote:
> diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
> index f30eef701970..dadc908318aa 100644
> --- a/drivers/dma/idxd/init.c
> +++ b/drivers/dma/idxd/init.c
> @@ -501,14 +501,52 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
>
> static int idxd_enable_system_pasid(struct idxd_device *idxd)
> {
> - return -EOPNOTSUPP;
> + struct pci_dev *pdev = idxd->pdev;
> + struct device *dev = &pdev->dev;
> + struct iommu_domain *domain;
> + union gencfg_reg gencfg;
> + ioasid_t pasid;
> + int ret;
> +
> + domain = iommu_get_domain_for_dev(dev);
> + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> + return -EPERM;
The idxd driver has claimed the DMA ownership of this device. Unless the
idxd driver itself attached another domain, iommu_get_domain_for_dev()
should never return a blocking domain.
"domain == NULL" happens when CONFIG_IOMMU_API is not set.
Furthermore, iommu_get_domain_for_dev() doesn't hold any refcount from
the domain, so in theory it's not safe here because it possibly causes
use-after-release case.
I would say iommu_get_dma_domain() or something similar is more suitable
for use here. It directly returns the device's default domain and the
iommu core guarantees that default domain will always valid during the
life cycle of any device driver.
Best regards,
baolu
Hi Baolu,
On Fri, 3 Mar 2023 09:19:48 +0800, Baolu Lu <baolu.lu@linux.intel.com>
wrote:
> On 3/2/23 8:59 AM, Jacob Pan wrote:
> > diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
> > index f30eef701970..dadc908318aa 100644
> > --- a/drivers/dma/idxd/init.c
> > +++ b/drivers/dma/idxd/init.c
> > @@ -501,14 +501,52 @@ static struct idxd_device *idxd_alloc(struct
> > pci_dev *pdev, struct idxd_driver_d
> > static int idxd_enable_system_pasid(struct idxd_device *idxd)
> > {
> > - return -EOPNOTSUPP;
> > + struct pci_dev *pdev = idxd->pdev;
> > + struct device *dev = &pdev->dev;
> > + struct iommu_domain *domain;
> > + union gencfg_reg gencfg;
> > + ioasid_t pasid;
> > + int ret;
> > +
> > + domain = iommu_get_domain_for_dev(dev);
> > + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> > + return -EPERM;
>
> The idxd driver has claimed the DMA ownership of this device. Unless the
> idxd driver itself attached another domain, iommu_get_domain_for_dev()
> should never return a blocking domain.
>
> "domain == NULL" happens when CONFIG_IOMMU_API is not set.
>
> Furthermore, iommu_get_domain_for_dev() doesn't hold any refcount from
> the domain, so in theory it's not safe here because it possibly causes
> use-after-release case.
>
> I would say iommu_get_dma_domain() or something similar is more suitable
> for use here. It directly returns the device's default domain and the
> iommu core guarantees that default domain will always valid during the
> life cycle of any device driver.
>
will do, same as Jason's comments.
Thanks,
Jacob
> From: Jacob Pan <jacob.jun.pan@linux.intel.com>
> Sent: Thursday, March 2, 2023 9:00 AM
>
> static int idxd_enable_system_pasid(struct idxd_device *idxd)
> {
> - return -EOPNOTSUPP;
> + struct pci_dev *pdev = idxd->pdev;
> + struct device *dev = &pdev->dev;
> + struct iommu_domain *domain;
> + union gencfg_reg gencfg;
> + ioasid_t pasid;
> + int ret;
> +
> + domain = iommu_get_domain_for_dev(dev);
> + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> + return -EPERM;
what about UNMANAGED?
> +
> + pasid = iommu_sva_reserve_pasid(1, dev->iommu->max_pasids);
> + if (pasid == IOMMU_PASID_INVALID)
> + return -ENOSPC;
as commented in last patch we can just pass a device pointer to a
general allocation interface.
> +
> + ret = iommu_attach_device_pasid(domain, dev, pasid);
> + if (ret) {
> + dev_err(dev, "failed to attach device pasid %d, domain
> type %d",
> + pasid, domain->type);
> + iommu_sva_unreserve_pasid(pasid);
> + return ret;
> + }
> +
> + /* Since we set user privilege for kernel DMA, enable completion IRQ
> */
> + gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
> + gencfg.user_int_en = 1;
> + iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
> + idxd->pasid = pasid;
Why does user privilege requires a completion interrupt?
Or instead it's more due to doing kernel DMA itself then we certainly
don't want to poll in the kernel?
Hi Kevin,
On Thu, 2 Mar 2023 09:47:00 +0000, "Tian, Kevin" <kevin.tian@intel.com>
wrote:
> > From: Jacob Pan <jacob.jun.pan@linux.intel.com>
> > Sent: Thursday, March 2, 2023 9:00 AM
> >
> > static int idxd_enable_system_pasid(struct idxd_device *idxd)
> > {
> > - return -EOPNOTSUPP;
> > + struct pci_dev *pdev = idxd->pdev;
> > + struct device *dev = &pdev->dev;
> > + struct iommu_domain *domain;
> > + union gencfg_reg gencfg;
> > + ioasid_t pasid;
> > + int ret;
> > +
> > + domain = iommu_get_domain_for_dev(dev);
> > + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> > + return -EPERM;
>
> what about UNMANAGED?
will fix this by getting the dma domain.
>
> > +
> > + pasid = iommu_sva_reserve_pasid(1, dev->iommu->max_pasids);
> > + if (pasid == IOMMU_PASID_INVALID)
> > + return -ENOSPC;
>
> as commented in last patch we can just pass a device pointer to a
> general allocation interface.
will do
>
> > +
> > + ret = iommu_attach_device_pasid(domain, dev, pasid);
> > + if (ret) {
> > + dev_err(dev, "failed to attach device pasid %d, domain
> > type %d",
> > + pasid, domain->type);
> > + iommu_sva_unreserve_pasid(pasid);
> > + return ret;
> > + }
> > +
> > + /* Since we set user privilege for kernel DMA, enable
> > completion IRQ */
> > + gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
> > + gencfg.user_int_en = 1;
> > + iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
> > + idxd->pasid = pasid;
>
> Why does user privilege requires a completion interrupt?
>
> Or instead it's more due to doing kernel DMA itself then we certainly
> don't want to poll in the kernel?
yes, kernel wq does not support polling, therefore it needs interrupts.
Without user_int_en bit set, there would be no interrupts if we use user
privilege for kernel wq.
Thanks,
Jacob
On Thu, Mar 02, 2023 at 09:47:00AM +0000, Tian, Kevin wrote:
> > From: Jacob Pan <jacob.jun.pan@linux.intel.com>
> > Sent: Thursday, March 2, 2023 9:00 AM
> >
> > static int idxd_enable_system_pasid(struct idxd_device *idxd)
> > {
> > - return -EOPNOTSUPP;
> > + struct pci_dev *pdev = idxd->pdev;
> > + struct device *dev = &pdev->dev;
> > + struct iommu_domain *domain;
> > + union gencfg_reg gencfg;
> > + ioasid_t pasid;
> > + int ret;
> > +
> > + domain = iommu_get_domain_for_dev(dev);
> > + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> > + return -EPERM;
>
> what about UNMANAGED?
Why are we checking this anyhow?
Get the domain the DMA API is using and feed it to
iommu_attach_device_pasid(). If the driver can't mirror the DMA API's
domain onto the PASID then it will just fail the attach. A domain
cannot even be NULL on x86.
Jason
Hi Jason,
On Thu, 2 Mar 2023 08:57:48 -0400, Jason Gunthorpe <jgg@nvidia.com> wrote:
> On Thu, Mar 02, 2023 at 09:47:00AM +0000, Tian, Kevin wrote:
> > > From: Jacob Pan <jacob.jun.pan@linux.intel.com>
> > > Sent: Thursday, March 2, 2023 9:00 AM
> > >
> > > static int idxd_enable_system_pasid(struct idxd_device *idxd)
> > > {
> > > - return -EOPNOTSUPP;
> > > + struct pci_dev *pdev = idxd->pdev;
> > > + struct device *dev = &pdev->dev;
> > > + struct iommu_domain *domain;
> > > + union gencfg_reg gencfg;
> > > + ioasid_t pasid;
> > > + int ret;
> > > +
> > > + domain = iommu_get_domain_for_dev(dev);
> > > + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> > > + return -EPERM;
> >
> > what about UNMANAGED?
>
> Why are we checking this anyhow?
>
> Get the domain the DMA API is using and feed it to
> iommu_attach_device_pasid(). If the driver can't mirror the DMA API's
> domain onto the PASID then it will just fail the attach. A domain
> cannot even be NULL on x86.
makes sense,
Thanks,
Jacob
On 3/1/23 5:59 PM, Jacob Pan wrote:
> Kernel workqueues were disabled due to flawed use of kernel VA and SVA
> API. Now That we have the support for attaching PASID to the device's
> default domain and the ability to reserve global PASIDs from SVA APIs,
> we can re-enable the kernel work queues and use them under DMA API.
>
> We also use non-privileged access for in-kernel DMA to be consistent
> with the IOMMU settings. Consequently, interrupt for user privilege is
> enabled for work completion IRQs.
>
> Link:https://lore.kernel.org/linux-iommu/20210511194726.GP1002214@nvidia.com/
> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>
> ---
> drivers/dma/idxd/device.c | 30 +++++--------------------
> drivers/dma/idxd/init.c | 47 +++++++++++++++++++++++++++++++++++----
> drivers/dma/idxd/sysfs.c | 7 ------
> 3 files changed, 48 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
> index 125652a8bb29..96faf4d3445e 100644
> --- a/drivers/dma/idxd/device.c
> +++ b/drivers/dma/idxd/device.c
> @@ -299,21 +299,6 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
> }
> }
>
> -static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
> -{
> - struct idxd_device *idxd = wq->idxd;
> - union wqcfg wqcfg;
> - unsigned int offset;
> -
> - offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
> - spin_lock(&idxd->dev_lock);
> - wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
> - wqcfg.priv = priv;
> - wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
> - iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
> - spin_unlock(&idxd->dev_lock);
> -}
> -
> static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
> {
> struct idxd_device *idxd = wq->idxd;
> @@ -1324,15 +1309,14 @@ int drv_enable_wq(struct idxd_wq *wq)
> }
>
> /*
> - * In the event that the WQ is configurable for pasid and priv bits.
> - * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
> - * However, for non-kernel wq, the driver should only set the pasid_en bit for
> - * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
> + * In the event that the WQ is configurable for pasid, the driver
> + * should setup the pasid, pasid_en bit. This is true for both kernel
> + * and user shared workqueues. There is no need to setup priv bit in
> + * that in-kernel DMA will also do user privileged requests.
> + * A dedicated wq that is not 'kernel' type will configure pasid and
> * pasid_en later on so there is no need to setup.
> */
> if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
> - int priv = 0;
> -
> if (wq_pasid_enabled(wq)) {
> if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
> u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
> @@ -1340,10 +1324,6 @@ int drv_enable_wq(struct idxd_wq *wq)
> __idxd_wq_set_pasid_locked(wq, pasid);
> }
> }
> -
> - if (is_idxd_wq_kernel(wq))
> - priv = 1;
> - __idxd_wq_set_priv_locked(wq, priv);
> }
>
> rc = 0;
> diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
> index f30eef701970..dadc908318aa 100644
> --- a/drivers/dma/idxd/init.c
> +++ b/drivers/dma/idxd/init.c
> @@ -501,14 +501,52 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
>
> static int idxd_enable_system_pasid(struct idxd_device *idxd)
> {
> - return -EOPNOTSUPP;
> + struct pci_dev *pdev = idxd->pdev;
> + struct device *dev = &pdev->dev;
> + struct iommu_domain *domain;
> + union gencfg_reg gencfg;
> + ioasid_t pasid;
> + int ret;
> +
> + domain = iommu_get_domain_for_dev(dev);
> + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> + return -EPERM;
> +
> + pasid = iommu_sva_reserve_pasid(1, dev->iommu->max_pasids);
> + if (pasid == IOMMU_PASID_INVALID)
> + return -ENOSPC;
> +
> + ret = iommu_attach_device_pasid(domain, dev, pasid);
> + if (ret) {
> + dev_err(dev, "failed to attach device pasid %d, domain type %d",
> + pasid, domain->type);
> + iommu_sva_unreserve_pasid(pasid);
> + return ret;
> + }
> +
> + /* Since we set user privilege for kernel DMA, enable completion IRQ */
> + gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
> + gencfg.user_int_en = 1;
> + iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
> + idxd->pasid = pasid;
> +
> + return ret;
> }
>
> static void idxd_disable_system_pasid(struct idxd_device *idxd)
> {
> + struct pci_dev *pdev = idxd->pdev;
> + struct device *dev = &pdev->dev;
> + struct iommu_domain *domain;
> +
> + domain = iommu_get_domain_for_dev(dev);
> + if (!domain || domain->type == IOMMU_DOMAIN_BLOCKED)
> + return;
>
> - iommu_sva_unbind_device(idxd->sva);
> + iommu_detach_device_pasid(domain, dev, idxd->pasid);
> + iommu_sva_unreserve_pasid(idxd->pasid);
> idxd->sva = NULL;
> + idxd->pasid = IOMMU_PASID_INVALID;
> }
>
> static int idxd_probe(struct idxd_device *idxd)
> @@ -530,8 +568,9 @@ static int idxd_probe(struct idxd_device *idxd)
> } else {
> set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
>
> - if (idxd_enable_system_pasid(idxd))
> - dev_warn(dev, "No in-kernel DMA with PASID.\n");
> + rc = idxd_enable_system_pasid(idxd);
> + if (rc)
> + dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
> else
> set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
> }
> diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
> index 3229dfc78650..09f5c3f2a992 100644
> --- a/drivers/dma/idxd/sysfs.c
> +++ b/drivers/dma/idxd/sysfs.c
> @@ -944,13 +944,6 @@ static ssize_t wq_name_store(struct device *dev,
> if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
> return -EINVAL;
>
> - /*
> - * This is temporarily placed here until we have SVM support for
> - * dmaengine.
> - */
> - if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
> - return -EOPNOTSUPP;
> -
> input = kstrndup(buf, count, GFP_KERNEL);
> if (!input)
> return -ENOMEM;
© 2016 - 2026 Red Hat, Inc.