The clock is not symmetrically disabled in the error-out routines.
Fixes: 109bd48ea2e1 ("iommu/msm: Add DT adaptation")
Cc: stable@vger.kernel.org
Cc: Sricharan R <sricharan@codeaurora.org>
Cc: Andy Gross <agross@kernel.org>
Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
Cc: Konrad Dybcio <konrad.dybcio@somainline.org>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
drivers/iommu/msm_iommu.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a24aa804ea3..a7d41ba4a47b 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -418,6 +418,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
list_for_each_entry(master, &iommu->ctx_list, list) {
if (master->num) {
dev_err(dev, "domain already attached");
+ __disable_clocks(iommu);
ret = -EEXIST;
goto fail;
}
@@ -425,6 +426,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
msm_iommu_alloc_ctx(iommu->context_map,
0, iommu->ncb);
if (IS_ERR_VALUE(master->num)) {
+ __disable_clocks(iommu);
ret = -ENODEV;
goto fail;
}
--
2.17.1
> From: Nicolin Chen <nicolinc@nvidia.com>
> Sent: Thursday, September 15, 2022 3:56 PM
>
> The clock is not symmetrically disabled in the error-out routines.
>
> Fixes: 109bd48ea2e1 ("iommu/msm: Add DT adaptation")
> Cc: stable@vger.kernel.org
> Cc: Sricharan R <sricharan@codeaurora.org>
> Cc: Andy Gross <agross@kernel.org>
> Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
> Cc: Konrad Dybcio <konrad.dybcio@somainline.org>
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> ---
> drivers/iommu/msm_iommu.c | 2 ++
> 1 file changed, 2 insertions(+)
>
> diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
> index 6a24aa804ea3..a7d41ba4a47b 100644
> --- a/drivers/iommu/msm_iommu.c
> +++ b/drivers/iommu/msm_iommu.c
> @@ -418,6 +418,7 @@ static int msm_iommu_attach_dev(struct
> iommu_domain *domain, struct device *dev)
> list_for_each_entry(master, &iommu->ctx_list, list) {
> if (master->num) {
> dev_err(dev, "domain already
> attached");
> + __disable_clocks(iommu);
> ret = -EEXIST;
> goto fail;
> }
> @@ -425,6 +426,7 @@ static int msm_iommu_attach_dev(struct
> iommu_domain *domain, struct device *dev)
> msm_iommu_alloc_ctx(iommu-
> >context_map,
> 0, iommu->ncb);
> if (IS_ERR_VALUE(master->num)) {
> + __disable_clocks(iommu);
also need to free_ctx() for already walked nodes.
btw it's a bit weird that although here is coded based on a list
in reality there is at most one node per list. According to
insert_iommu_master() a master object is allocated and inserted
to the ctx_list only if the ctx_list is currently empty...
> ret = -ENODEV;
> goto fail;
> }
> --
> 2.17.1
On Tue, Sep 20, 2022 at 06:15:21AM +0000, Tian, Kevin wrote:
> External email: Use caution opening links or attachments
>
>
> > From: Nicolin Chen <nicolinc@nvidia.com>
> > Sent: Thursday, September 15, 2022 3:56 PM
> >
> > The clock is not symmetrically disabled in the error-out routines.
> >
> > Fixes: 109bd48ea2e1 ("iommu/msm: Add DT adaptation")
> > Cc: stable@vger.kernel.org
> > Cc: Sricharan R <sricharan@codeaurora.org>
> > Cc: Andy Gross <agross@kernel.org>
> > Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
> > Cc: Konrad Dybcio <konrad.dybcio@somainline.org>
> > Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> > ---
> > drivers/iommu/msm_iommu.c | 2 ++
> > 1 file changed, 2 insertions(+)
> >
> > diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
> > index 6a24aa804ea3..a7d41ba4a47b 100644
> > --- a/drivers/iommu/msm_iommu.c
> > +++ b/drivers/iommu/msm_iommu.c
> > @@ -418,6 +418,7 @@ static int msm_iommu_attach_dev(struct
> > iommu_domain *domain, struct device *dev)
> > list_for_each_entry(master, &iommu->ctx_list, list) {
> > if (master->num) {
> > dev_err(dev, "domain already
> > attached");
> > + __disable_clocks(iommu);
> > ret = -EEXIST;
> > goto fail;
> > }
> > @@ -425,6 +426,7 @@ static int msm_iommu_attach_dev(struct
> > iommu_domain *domain, struct device *dev)
> > msm_iommu_alloc_ctx(iommu-
> > >context_map,
> > 0, iommu->ncb);
> > if (IS_ERR_VALUE(master->num)) {
> > + __disable_clocks(iommu);
>
> also need to free_ctx() for already walked nodes.
Oooo...yes. Probably could reuse the detach() -- [1].
> btw it's a bit weird that although here is coded based on a list
> in reality there is at most one node per list. According to
> insert_iommu_master() a master object is allocated and inserted
> to the ctx_list only if the ctx_list is currently empty...
Yea. The insert_iommu_master() indicates that there would be only
one master on a cts_list, while the rest part of the driver tries
to take care of a potential multi-master per cts_list case, which
practically won't happen by looking at the DT file. But the driver
existed for the legacy platform data configuration too, so I don't
intend to change too much...
Thanks!
Nic
[1]
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6a24aa804ea3..30c5662e24bc 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -394,6 +394,33 @@ static struct iommu_device *msm_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_priv *priv = to_msm_priv(domain);
+ unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
+ int ret;
+
+ free_io_pgtable_ops(priv->iop);
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ msm_iommu_free_ctx(iommu->context_map, master->num);
+ __reset_context(iommu->base, master->num);
+ }
+ __disable_clocks(iommu);
+ }
+fail:
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
+
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -418,13 +445,15 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
list_for_each_entry(master, &iommu->ctx_list, list) {
if (master->num) {
dev_err(dev, "domain already attached");
+ __disable_clocks(iommu);
ret = -EEXIST;
goto fail;
}
master->num =
msm_iommu_alloc_ctx(iommu->context_map,
0, iommu->ncb);
- if (IS_ERR_VALUE(master->num)) {
+ if (master->num < 0) {
+ __disable_clocks(iommu);
ret = -ENODEV;
goto fail;
}
@@ -439,37 +468,12 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
+ if (ret)
+ msm_iommu_detach_dev(domain, dev);
return ret;
}
-static void msm_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
-{
- struct msm_priv *priv = to_msm_priv(domain);
- unsigned long flags;
- struct msm_iommu_dev *iommu;
- struct msm_iommu_ctx_dev *master;
- int ret;
-
- free_io_pgtable_ops(priv->iop);
-
- spin_lock_irqsave(&msm_iommu_lock, flags);
- list_for_each_entry(iommu, &priv->list_attached, dom_node) {
- ret = __enable_clocks(iommu);
- if (ret)
- goto fail;
-
- list_for_each_entry(master, &iommu->ctx_list, list) {
- msm_iommu_free_ctx(iommu->context_map, master->num);
- __reset_context(iommu->base, master->num);
- }
- __disable_clocks(iommu);
- }
-fail:
- spin_unlock_irqrestore(&msm_iommu_lock, flags);
-}
-
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{
> From: Nicolin Chen <nicolinc@nvidia.com>
> Sent: Wednesday, September 21, 2022 2:47 AM
>
> On Tue, Sep 20, 2022 at 06:15:21AM +0000, Tian, Kevin wrote:
> > External email: Use caution opening links or attachments
> >
> >
> > > From: Nicolin Chen <nicolinc@nvidia.com>
> > > Sent: Thursday, September 15, 2022 3:56 PM
> > >
> > > The clock is not symmetrically disabled in the error-out routines.
> > >
> > > Fixes: 109bd48ea2e1 ("iommu/msm: Add DT adaptation")
> > > Cc: stable@vger.kernel.org
> > > Cc: Sricharan R <sricharan@codeaurora.org>
> > > Cc: Andy Gross <agross@kernel.org>
> > > Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
> > > Cc: Konrad Dybcio <konrad.dybcio@somainline.org>
> > > Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
> > > ---
> > > drivers/iommu/msm_iommu.c | 2 ++
> > > 1 file changed, 2 insertions(+)
> > >
> > > diff --git a/drivers/iommu/msm_iommu.c
> b/drivers/iommu/msm_iommu.c
> > > index 6a24aa804ea3..a7d41ba4a47b 100644
> > > --- a/drivers/iommu/msm_iommu.c
> > > +++ b/drivers/iommu/msm_iommu.c
> > > @@ -418,6 +418,7 @@ static int msm_iommu_attach_dev(struct
> > > iommu_domain *domain, struct device *dev)
> > > list_for_each_entry(master, &iommu->ctx_list, list) {
> > > if (master->num) {
> > > dev_err(dev, "domain already
> > > attached");
> > > + __disable_clocks(iommu);
> > > ret = -EEXIST;
> > > goto fail;
> > > }
> > > @@ -425,6 +426,7 @@ static int msm_iommu_attach_dev(struct
> > > iommu_domain *domain, struct device *dev)
> > > msm_iommu_alloc_ctx(iommu-
> > > >context_map,
> > > 0, iommu->ncb);
> > > if (IS_ERR_VALUE(master->num)) {
> > > + __disable_clocks(iommu);
> >
> > also need to free_ctx() for already walked nodes.
>
> Oooo...yes. Probably could reuse the detach() -- [1].
>
> > btw it's a bit weird that although here is coded based on a list
> > in reality there is at most one node per list. According to
> > insert_iommu_master() a master object is allocated and inserted
> > to the ctx_list only if the ctx_list is currently empty...
>
> Yea. The insert_iommu_master() indicates that there would be only
> one master on a cts_list, while the rest part of the driver tries
> to take care of a potential multi-master per cts_list case, which
> practically won't happen by looking at the DT file. But the driver
> existed for the legacy platform data configuration too, so I don't
> intend to change too much...
it's also problematic that it assumes that the attached device
must be the first master in the list. But anyway I agree to not
change too much given that mess.
>
> Thanks!
> Nic
>
> [1]
> diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
> index 6a24aa804ea3..30c5662e24bc 100644
> --- a/drivers/iommu/msm_iommu.c
> +++ b/drivers/iommu/msm_iommu.c
> @@ -394,6 +394,33 @@ static struct iommu_device
> *msm_iommu_probe_device(struct device *dev)
> return &iommu->iommu;
> }
>
> +static void msm_iommu_detach_dev(struct iommu_domain *domain,
> + struct device *dev)
> +{
> + struct msm_priv *priv = to_msm_priv(domain);
> + unsigned long flags;
> + struct msm_iommu_dev *iommu;
> + struct msm_iommu_ctx_dev *master;
> + int ret;
> +
> + free_io_pgtable_ops(priv->iop);
> +
> + spin_lock_irqsave(&msm_iommu_lock, flags);
> + list_for_each_entry(iommu, &priv->list_attached, dom_node) {
> + ret = __enable_clocks(iommu);
> + if (ret)
> + goto fail;
> +
> + list_for_each_entry(master, &iommu->ctx_list, list) {
> + msm_iommu_free_ctx(iommu->context_map,
> master->num);
> + __reset_context(iommu->base, master->num);
> + }
> + __disable_clocks(iommu);
> + }
> +fail:
> + spin_unlock_irqrestore(&msm_iommu_lock, flags);
> +}
> +
> static int msm_iommu_attach_dev(struct iommu_domain *domain, struct
> device *dev)
> {
> int ret = 0;
> @@ -418,13 +445,15 @@ static int msm_iommu_attach_dev(struct
> iommu_domain *domain, struct device *dev)
> list_for_each_entry(master, &iommu->ctx_list, list) {
> if (master->num) {
> dev_err(dev, "domain already
> attached");
> + __disable_clocks(iommu);
> ret = -EEXIST;
> goto fail;
> }
> master->num =
> msm_iommu_alloc_ctx(iommu-
> >context_map,
> 0, iommu->ncb);
> - if (IS_ERR_VALUE(master->num)) {
> + if (master->num < 0) {
> + __disable_clocks(iommu);
> ret = -ENODEV;
> goto fail;
> }
> @@ -439,37 +468,12 @@ static int msm_iommu_attach_dev(struct
> iommu_domain *domain, struct device *dev)
>
> fail:
> spin_unlock_irqrestore(&msm_iommu_lock, flags);
> + if (ret)
> + msm_iommu_detach_dev(domain, dev);
>
> return ret;
> }
>
> -static void msm_iommu_detach_dev(struct iommu_domain *domain,
> - struct device *dev)
> -{
> - struct msm_priv *priv = to_msm_priv(domain);
> - unsigned long flags;
> - struct msm_iommu_dev *iommu;
> - struct msm_iommu_ctx_dev *master;
> - int ret;
> -
> - free_io_pgtable_ops(priv->iop);
> -
> - spin_lock_irqsave(&msm_iommu_lock, flags);
> - list_for_each_entry(iommu, &priv->list_attached, dom_node) {
> - ret = __enable_clocks(iommu);
> - if (ret)
> - goto fail;
> -
> - list_for_each_entry(master, &iommu->ctx_list, list) {
> - msm_iommu_free_ctx(iommu->context_map,
> master->num);
> - __reset_context(iommu->base, master->num);
> - }
> - __disable_clocks(iommu);
> - }
> -fail:
> - spin_unlock_irqrestore(&msm_iommu_lock, flags);
> -}
> -
> static int msm_iommu_map(struct iommu_domain *domain, unsigned long
> iova,
> phys_addr_t pa, size_t len, int prot, gfp_t gfp)
> {
© 2016 - 2026 Red Hat, Inc.