The MPM hardware is accessible to us from the ARM CPUs through a shared
memory region (RPM MSG RAM) that's also concurrently accessed by other
kinds of cores on the system (like modem, ADSP etc.). Modeling this
relation in a (somewhat) sane manner in the device tree basically
requires us to either present the MPM as a child of said memory region
(which makes little sense, as a mapped memory carveout is not a bus),
define nodes which bleed their register spaces into one another, or
passing their slice of the MSG RAM through some kind of a property.
Go with the third option and add a way to map a region passed through
the "qcom,rpm-msg-ram" property as our register space.
The current way of using 'reg' is preserved for ABI reasons.
Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
---
drivers/irqchip/irq-qcom-mpm.c | 30 +++++++++++++++++++++++++-----
1 file changed, 25 insertions(+), 5 deletions(-)
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index d30614661eea..6fe59f4deef4 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -14,6 +14,7 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
struct device *dev = &pdev->dev;
struct irq_domain *parent_domain;
struct generic_pm_domain *genpd;
+ struct device_node *msgram_np;
struct qcom_mpm_priv *priv;
unsigned int pin_cnt;
+ struct resource res;
int i, irq;
int ret;
@@ -374,9 +377,21 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
raw_spin_lock_init(&priv->lock);
- priv->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ /* If we have a handle to an RPM message ram partition, use it. */
+ msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
+ if (msgram_np) {
+ ret = of_address_to_resource(msgram_np, 0, &res);
+ /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
+ priv->base = ioremap(res.start, resource_size(&res));
+ of_node_put(msgram_np);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+ } else {
+ /* Otherwise, fall back to simple MMIO. */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+ }
for (i = 0; i < priv->reg_stride; i++) {
qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
@@ -387,8 +402,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto unmap_base;
+ }
genpd = &priv->genpd;
genpd->flags = GENPD_FLAG_IRQ_SAFE;
@@ -451,6 +468,9 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
mbox_free_channel(priv->mbox_chan);
remove_genpd:
pm_genpd_remove(genpd);
+unmap_base:
+ if (res.start)
+ iounmap(priv->base);
return ret;
}
--
2.40.0
On Tue, Mar 28, 2023 at 12:02:53PM +0200, Konrad Dybcio wrote:
> The MPM hardware is accessible to us from the ARM CPUs through a shared
> memory region (RPM MSG RAM) that's also concurrently accessed by other
> kinds of cores on the system (like modem, ADSP etc.). Modeling this
> relation in a (somewhat) sane manner in the device tree basically
> requires us to either present the MPM as a child of said memory region
> (which makes little sense, as a mapped memory carveout is not a bus),
> define nodes which bleed their register spaces into one another, or
> passing their slice of the MSG RAM through some kind of a property.
>
> Go with the third option and add a way to map a region passed through
> the "qcom,rpm-msg-ram" property as our register space.
>
> The current way of using 'reg' is preserved for ABI reasons.
>
> Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
> ---
> drivers/irqchip/irq-qcom-mpm.c | 30 +++++++++++++++++++++++++-----
> 1 file changed, 25 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
> index d30614661eea..6fe59f4deef4 100644
> --- a/drivers/irqchip/irq-qcom-mpm.c
> +++ b/drivers/irqchip/irq-qcom-mpm.c
> @@ -14,6 +14,7 @@
> #include <linux/mailbox_client.h>
> #include <linux/module.h>
> #include <linux/of.h>
> +#include <linux/of_address.h>
> #include <linux/of_device.h>
> #include <linux/platform_device.h>
> #include <linux/pm_domain.h>
> @@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
> struct device *dev = &pdev->dev;
> struct irq_domain *parent_domain;
> struct generic_pm_domain *genpd;
> + struct device_node *msgram_np;
> struct qcom_mpm_priv *priv;
> unsigned int pin_cnt;
> + struct resource res;
> int i, irq;
> int ret;
>
> @@ -374,9 +377,21 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>
> raw_spin_lock_init(&priv->lock);
>
> - priv->base = devm_platform_ioremap_resource(pdev, 0);
> - if (IS_ERR(priv->base))
> - return PTR_ERR(priv->base);
> + /* If we have a handle to an RPM message ram partition, use it. */
> + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
> + if (msgram_np) {
> + ret = of_address_to_resource(msgram_np, 0, &res);
> + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
> + priv->base = ioremap(res.start, resource_size(&res));
Are you suggesting that other cores/drivers will also need to access
the mpm slice below?
apss_mpm: sram@1b8 {
reg = <0x1b8 0x48>;
};
Shawn
> + of_node_put(msgram_np);
> + if (IS_ERR(priv->base))
> + return PTR_ERR(priv->base);
> + } else {
> + /* Otherwise, fall back to simple MMIO. */
> + priv->base = devm_platform_ioremap_resource(pdev, 0);
> + if (IS_ERR(priv->base))
> + return PTR_ERR(priv->base);
> + }
>
> for (i = 0; i < priv->reg_stride; i++) {
> qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
> @@ -387,8 +402,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
> }
>
> irq = platform_get_irq(pdev, 0);
> - if (irq < 0)
> - return irq;
> + if (irq < 0) {
> + ret = irq;
> + goto unmap_base;
> + }
>
> genpd = &priv->genpd;
> genpd->flags = GENPD_FLAG_IRQ_SAFE;
> @@ -451,6 +468,9 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
> mbox_free_channel(priv->mbox_chan);
> remove_genpd:
> pm_genpd_remove(genpd);
> +unmap_base:
> + if (res.start)
> + iounmap(priv->base);
> return ret;
> }
>
>
> --
> 2.40.0
>
On 29.03.2023 05:49, Shawn Guo wrote:
> On Tue, Mar 28, 2023 at 12:02:53PM +0200, Konrad Dybcio wrote:
>> The MPM hardware is accessible to us from the ARM CPUs through a shared
>> memory region (RPM MSG RAM) that's also concurrently accessed by other
>> kinds of cores on the system (like modem, ADSP etc.). Modeling this
>> relation in a (somewhat) sane manner in the device tree basically
>> requires us to either present the MPM as a child of said memory region
>> (which makes little sense, as a mapped memory carveout is not a bus),
>> define nodes which bleed their register spaces into one another, or
>> passing their slice of the MSG RAM through some kind of a property.
>>
>> Go with the third option and add a way to map a region passed through
>> the "qcom,rpm-msg-ram" property as our register space.
>>
>> The current way of using 'reg' is preserved for ABI reasons.
>>
>> Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
>> ---
>> drivers/irqchip/irq-qcom-mpm.c | 30 +++++++++++++++++++++++++-----
>> 1 file changed, 25 insertions(+), 5 deletions(-)
>>
>> diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
>> index d30614661eea..6fe59f4deef4 100644
>> --- a/drivers/irqchip/irq-qcom-mpm.c
>> +++ b/drivers/irqchip/irq-qcom-mpm.c
>> @@ -14,6 +14,7 @@
>> #include <linux/mailbox_client.h>
>> #include <linux/module.h>
>> #include <linux/of.h>
>> +#include <linux/of_address.h>
>> #include <linux/of_device.h>
>> #include <linux/platform_device.h>
>> #include <linux/pm_domain.h>
>> @@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>> struct device *dev = &pdev->dev;
>> struct irq_domain *parent_domain;
>> struct generic_pm_domain *genpd;
>> + struct device_node *msgram_np;
>> struct qcom_mpm_priv *priv;
>> unsigned int pin_cnt;
>> + struct resource res;
>> int i, irq;
>> int ret;
>>
>> @@ -374,9 +377,21 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>>
>> raw_spin_lock_init(&priv->lock);
>>
>> - priv->base = devm_platform_ioremap_resource(pdev, 0);
>> - if (IS_ERR(priv->base))
>> - return PTR_ERR(priv->base);
>> + /* If we have a handle to an RPM message ram partition, use it. */
>> + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
>> + if (msgram_np) {
>> + ret = of_address_to_resource(msgram_np, 0, &res);
>> + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
>> + priv->base = ioremap(res.start, resource_size(&res));
>
> Are you suggesting that other cores/drivers will also need to access
> the mpm slice below?
>
> apss_mpm: sram@1b8 {
> reg = <0x1b8 0x48>;
> };
Yes, the RPM M3 core. Other slices may be accessed
by any core at any time.
Konrad
>
> Shawn
>
>> + of_node_put(msgram_np);
>> + if (IS_ERR(priv->base))
>> + return PTR_ERR(priv->base);
>> + } else {
>> + /* Otherwise, fall back to simple MMIO. */
>> + priv->base = devm_platform_ioremap_resource(pdev, 0);
>> + if (IS_ERR(priv->base))
>> + return PTR_ERR(priv->base);
>> + }
>>
>> for (i = 0; i < priv->reg_stride; i++) {
>> qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
>> @@ -387,8 +402,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>> }
>>
>> irq = platform_get_irq(pdev, 0);
>> - if (irq < 0)
>> - return irq;
>> + if (irq < 0) {
>> + ret = irq;
>> + goto unmap_base;
>> + }
>>
>> genpd = &priv->genpd;
>> genpd->flags = GENPD_FLAG_IRQ_SAFE;
>> @@ -451,6 +468,9 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>> mbox_free_channel(priv->mbox_chan);
>> remove_genpd:
>> pm_genpd_remove(genpd);
>> +unmap_base:
>> + if (res.start)
>> + iounmap(priv->base);
>> return ret;
>> }
>>
>>
>> --
>> 2.40.0
>>
On Wed, Mar 29, 2023 at 01:06:11PM +0200, Konrad Dybcio wrote:
>
>
> On 29.03.2023 05:49, Shawn Guo wrote:
> > On Tue, Mar 28, 2023 at 12:02:53PM +0200, Konrad Dybcio wrote:
> >> The MPM hardware is accessible to us from the ARM CPUs through a shared
> >> memory region (RPM MSG RAM) that's also concurrently accessed by other
> >> kinds of cores on the system (like modem, ADSP etc.). Modeling this
> >> relation in a (somewhat) sane manner in the device tree basically
> >> requires us to either present the MPM as a child of said memory region
> >> (which makes little sense, as a mapped memory carveout is not a bus),
> >> define nodes which bleed their register spaces into one another, or
> >> passing their slice of the MSG RAM through some kind of a property.
> >>
> >> Go with the third option and add a way to map a region passed through
> >> the "qcom,rpm-msg-ram" property as our register space.
> >>
> >> The current way of using 'reg' is preserved for ABI reasons.
> >>
> >> Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
> >> ---
> >> drivers/irqchip/irq-qcom-mpm.c | 30 +++++++++++++++++++++++++-----
> >> 1 file changed, 25 insertions(+), 5 deletions(-)
> >>
> >> diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
> >> index d30614661eea..6fe59f4deef4 100644
> >> --- a/drivers/irqchip/irq-qcom-mpm.c
> >> +++ b/drivers/irqchip/irq-qcom-mpm.c
> >> @@ -14,6 +14,7 @@
> >> #include <linux/mailbox_client.h>
> >> #include <linux/module.h>
> >> #include <linux/of.h>
> >> +#include <linux/of_address.h>
> >> #include <linux/of_device.h>
> >> #include <linux/platform_device.h>
> >> #include <linux/pm_domain.h>
> >> @@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
> >> struct device *dev = &pdev->dev;
> >> struct irq_domain *parent_domain;
> >> struct generic_pm_domain *genpd;
> >> + struct device_node *msgram_np;
> >> struct qcom_mpm_priv *priv;
> >> unsigned int pin_cnt;
> >> + struct resource res;
> >> int i, irq;
> >> int ret;
> >>
> >> @@ -374,9 +377,21 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
> >>
> >> raw_spin_lock_init(&priv->lock);
> >>
> >> - priv->base = devm_platform_ioremap_resource(pdev, 0);
> >> - if (IS_ERR(priv->base))
> >> - return PTR_ERR(priv->base);
> >> + /* If we have a handle to an RPM message ram partition, use it. */
> >> + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
> >> + if (msgram_np) {
> >> + ret = of_address_to_resource(msgram_np, 0, &res);
> >> + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
> >> + priv->base = ioremap(res.start, resource_size(&res));
> >
> > Are you suggesting that other cores/drivers will also need to access
> > the mpm slice below?
> >
> > apss_mpm: sram@1b8 {
> > reg = <0x1b8 0x48>;
> > };
> Yes, the RPM M3 core. Other slices may be accessed
> by any core at any time.
Hmm, let me reword my question. Other than irq-qcom-mpm, is there any
other Linux drivers that also need to request this slice region?
Otherwise, I do not understand why devm_ioremap_resource() cannot be
used.
Shawn
On 29.03.2023 15:28, Shawn Guo wrote:
> On Wed, Mar 29, 2023 at 01:06:11PM +0200, Konrad Dybcio wrote:
>>
>>
>> On 29.03.2023 05:49, Shawn Guo wrote:
>>> On Tue, Mar 28, 2023 at 12:02:53PM +0200, Konrad Dybcio wrote:
>>>> The MPM hardware is accessible to us from the ARM CPUs through a shared
>>>> memory region (RPM MSG RAM) that's also concurrently accessed by other
>>>> kinds of cores on the system (like modem, ADSP etc.). Modeling this
>>>> relation in a (somewhat) sane manner in the device tree basically
>>>> requires us to either present the MPM as a child of said memory region
>>>> (which makes little sense, as a mapped memory carveout is not a bus),
>>>> define nodes which bleed their register spaces into one another, or
>>>> passing their slice of the MSG RAM through some kind of a property.
>>>>
>>>> Go with the third option and add a way to map a region passed through
>>>> the "qcom,rpm-msg-ram" property as our register space.
>>>>
>>>> The current way of using 'reg' is preserved for ABI reasons.
>>>>
>>>> Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
>>>> ---
>>>> drivers/irqchip/irq-qcom-mpm.c | 30 +++++++++++++++++++++++++-----
>>>> 1 file changed, 25 insertions(+), 5 deletions(-)
>>>>
>>>> diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
>>>> index d30614661eea..6fe59f4deef4 100644
>>>> --- a/drivers/irqchip/irq-qcom-mpm.c
>>>> +++ b/drivers/irqchip/irq-qcom-mpm.c
>>>> @@ -14,6 +14,7 @@
>>>> #include <linux/mailbox_client.h>
>>>> #include <linux/module.h>
>>>> #include <linux/of.h>
>>>> +#include <linux/of_address.h>
>>>> #include <linux/of_device.h>
>>>> #include <linux/platform_device.h>
>>>> #include <linux/pm_domain.h>
>>>> @@ -322,8 +323,10 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>>>> struct device *dev = &pdev->dev;
>>>> struct irq_domain *parent_domain;
>>>> struct generic_pm_domain *genpd;
>>>> + struct device_node *msgram_np;
>>>> struct qcom_mpm_priv *priv;
>>>> unsigned int pin_cnt;
>>>> + struct resource res;
>>>> int i, irq;
>>>> int ret;
>>>>
>>>> @@ -374,9 +377,21 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>>>>
>>>> raw_spin_lock_init(&priv->lock);
>>>>
>>>> - priv->base = devm_platform_ioremap_resource(pdev, 0);
>>>> - if (IS_ERR(priv->base))
>>>> - return PTR_ERR(priv->base);
>>>> + /* If we have a handle to an RPM message ram partition, use it. */
>>>> + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
>>>> + if (msgram_np) {
>>>> + ret = of_address_to_resource(msgram_np, 0, &res);
>>>> + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
>>>> + priv->base = ioremap(res.start, resource_size(&res));
>>>
>>> Are you suggesting that other cores/drivers will also need to access
>>> the mpm slice below?
>>>
>>> apss_mpm: sram@1b8 {
>>> reg = <0x1b8 0x48>;
>>> };
>> Yes, the RPM M3 core. Other slices may be accessed
>> by any core at any time.
>
> Hmm, let me reword my question. Other than irq-qcom-mpm, is there any
> other Linux drivers that also need to request this slice region?
No.
> Otherwise, I do not understand why devm_ioremap_resource() cannot be
> used.
drivers/rpmsg/qcom_glink_rpm.c calls devm_ioremap on the entire
RPM MSG RAM.
Konrad
>
> Shawn
On Wed, Mar 29, 2023 at 03:30:12PM +0200, Konrad Dybcio wrote: > > Otherwise, I do not understand why devm_ioremap_resource() cannot be > > used. > drivers/rpmsg/qcom_glink_rpm.c calls devm_ioremap on the entire > RPM MSG RAM. qcom_glink_rpm driver remaps the entire RPM MSG RAM, but it doesn't seem to request any region. So MPM can still call devm_ioremap_resource() on its slice, no? Shawn
On 30.03.2023 03:50, Shawn Guo wrote: > On Wed, Mar 29, 2023 at 03:30:12PM +0200, Konrad Dybcio wrote: >>> Otherwise, I do not understand why devm_ioremap_resource() cannot be >>> used. >> drivers/rpmsg/qcom_glink_rpm.c calls devm_ioremap on the entire >> RPM MSG RAM. > > qcom_glink_rpm driver remaps the entire RPM MSG RAM, but it doesn't seem > to request any region. So MPM can still call devm_ioremap_resource() on > its slice, no? FWIW, I did get a 'can't request resource error'. Konrad > > Shawn
On Wed, Mar 29, 2023 at 03:30:12PM +0200, Konrad Dybcio wrote:
> >>>> @@ -374,9 +377,21 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
> >>>>
> >>>> raw_spin_lock_init(&priv->lock);
> >>>>
> >>>> - priv->base = devm_platform_ioremap_resource(pdev, 0);
> >>>> - if (IS_ERR(priv->base))
> >>>> - return PTR_ERR(priv->base);
> >>>> + /* If we have a handle to an RPM message ram partition, use it. */
> >>>> + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
> >>>> + if (msgram_np) {
> >>>> + ret = of_address_to_resource(msgram_np, 0, &res);
> >>>> + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
> >>>> + priv->base = ioremap(res.start, resource_size(&res));
> >>>
> >>> Are you suggesting that other cores/drivers will also need to access
> >>> the mpm slice below?
> >>>
> >>> apss_mpm: sram@1b8 {
> >>> reg = <0x1b8 0x48>;
> >>> };
> >> Yes, the RPM M3 core. Other slices may be accessed
> >> by any core at any time.
> >
> > Hmm, let me reword my question. Other than irq-qcom-mpm, is there any
> > other Linux drivers that also need to request this slice region?
> No.
>
> > Otherwise, I do not understand why devm_ioremap_resource() cannot be
> > used.
> drivers/rpmsg/qcom_glink_rpm.c calls devm_ioremap on the entire
> RPM MSG RAM.
Can we use devm_ioremap() too instead of ioremap() here?
Shawn
On 30.03.2023 03:34, Shawn Guo wrote:
> On Wed, Mar 29, 2023 at 03:30:12PM +0200, Konrad Dybcio wrote:
>>>>>> @@ -374,9 +377,21 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
>>>>>>
>>>>>> raw_spin_lock_init(&priv->lock);
>>>>>>
>>>>>> - priv->base = devm_platform_ioremap_resource(pdev, 0);
>>>>>> - if (IS_ERR(priv->base))
>>>>>> - return PTR_ERR(priv->base);
>>>>>> + /* If we have a handle to an RPM message ram partition, use it. */
>>>>>> + msgram_np = of_parse_phandle(np, "qcom,rpm-msg-ram", 0);
>>>>>> + if (msgram_np) {
>>>>>> + ret = of_address_to_resource(msgram_np, 0, &res);
>>>>>> + /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
>>>>>> + priv->base = ioremap(res.start, resource_size(&res));
>>>>>
>>>>> Are you suggesting that other cores/drivers will also need to access
>>>>> the mpm slice below?
>>>>>
>>>>> apss_mpm: sram@1b8 {
>>>>> reg = <0x1b8 0x48>;
>>>>> };
>>>> Yes, the RPM M3 core. Other slices may be accessed
>>>> by any core at any time.
>>>
>>> Hmm, let me reword my question. Other than irq-qcom-mpm, is there any
>>> other Linux drivers that also need to request this slice region?
>> No.
>>
>>> Otherwise, I do not understand why devm_ioremap_resource() cannot be
>>> used.
>> drivers/rpmsg/qcom_glink_rpm.c calls devm_ioremap on the entire
>> RPM MSG RAM.
>
> Can we use devm_ioremap() too instead of ioremap() here?
Hm. Yes, we totally can!
Konrad
>
> Shawn
© 2016 - 2026 Red Hat, Inc.