Enable runtime PM in mtd driver to notify graphics driver that
whole card should be kept awake while nvm operations are
performed through this driver.
CC: Lucas De Marchi <lucas.demarchi@intel.com>
Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
---
drivers/mtd/devices/mtd-intel-dg.c | 79 +++++++++++++++++++++++++-----
1 file changed, 67 insertions(+), 12 deletions(-)
diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-intel-dg.c
index 230bf444b7fe..a84153812291 100644
--- a/drivers/mtd/devices/mtd-intel-dg.c
+++ b/drivers/mtd/devices/mtd-intel-dg.c
@@ -15,11 +15,14 @@
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sizes.h>
#include <linux/types.h>
+#define INTEL_DG_NVM_RPM_TIMEOUT 500
+
struct intel_dg_nvm {
struct kref refcnt;
struct mtd_info mtd;
@@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
loff_t from;
size_t len;
size_t total_len;
+ int ret = 0;
if (WARN_ON(!nvm))
return -EINVAL;
@@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
total_len = info->len;
addr = info->addr;
+ ret = pm_runtime_resume_and_get(&mtd->dev);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
+ return ret;
+ }
+
guard(mutex)(&nvm->lock);
while (total_len > 0) {
if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
info->fail_addr = addr;
- return -ERANGE;
+ ret = -ERANGE;
+ goto out;
}
idx = idg_nvm_get_region(nvm, addr);
if (idx >= nvm->nregions) {
dev_err(&mtd->dev, "out of range");
info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
- return -ERANGE;
+ ret = -ERANGE;
+ goto out;
}
from = addr - nvm->regions[idx].offset;
@@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
if (bytes < 0) {
dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
info->fail_addr += nvm->regions[idx].offset;
- return bytes;
+ ret = bytes;
+ goto out;
}
addr += len;
total_len -= len;
}
- return 0;
+out:
+ pm_runtime_mark_last_busy(&mtd->dev);
+ pm_runtime_put_autosuspend(&mtd->dev);
+ return ret;
}
static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
if (len > nvm->regions[idx].size - from)
len = nvm->regions[idx].size - from;
+ ret = pm_runtime_resume_and_get(&mtd->dev);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+ return ret;
+ }
+
guard(mutex)(&nvm->lock);
ret = idg_read(nvm, region, from, len, buf);
if (ret < 0) {
dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
- return ret;
+ } else {
+ *retlen = ret;
+ ret = 0;
}
- *retlen = ret;
-
- return 0;
+ pm_runtime_mark_last_busy(&mtd->dev);
+ pm_runtime_put_autosuspend(&mtd->dev);
+ return ret;
}
static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
if (len > nvm->regions[idx].size - to)
len = nvm->regions[idx].size - to;
+ ret = pm_runtime_resume_and_get(&mtd->dev);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+ return ret;
+ }
+
guard(mutex)(&nvm->lock);
ret = idg_write(nvm, region, to, len, buf);
if (ret < 0) {
dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
- return ret;
+ } else {
+ *retlen = ret;
+ ret = 0;
}
- *retlen = ret;
-
- return 0;
+ pm_runtime_mark_last_busy(&mtd->dev);
+ pm_runtime_put_autosuspend(&mtd->dev);
+ return ret;
}
static void intel_dg_nvm_release(struct kref *kref)
@@ -670,6 +702,15 @@ static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device
kfree(parts);
+ if (ret)
+ goto out;
+
+ devm_pm_runtime_enable(&nvm->mtd.dev);
+
+ pm_runtime_set_autosuspend_delay(&nvm->mtd.dev, INTEL_DG_NVM_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(&nvm->mtd.dev);
+
+out:
return ret;
}
@@ -720,6 +761,17 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
n++;
}
+ devm_pm_runtime_enable(device);
+
+ pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(device);
+
+ ret = pm_runtime_resume_and_get(device);
+ if (ret < 0) {
+ dev_err(device, "rpm: get failed %d\n", ret);
+ goto err_norpm;
+ }
+
nvm->base = devm_ioremap_resource(device, &invm->bar);
if (IS_ERR(nvm->base)) {
dev_err(device, "mmio not mapped\n");
@@ -742,9 +794,12 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
dev_set_drvdata(&aux_dev->dev, nvm);
+ pm_runtime_put(device);
return 0;
err:
+ pm_runtime_put(device);
+err_norpm:
kref_put(&nvm->refcnt, intel_dg_nvm_release);
return ret;
}
--
2.43.0
On Wed, Jan 01, 2025 at 05:39:21PM +0200, Alexander Usyskin wrote:
> Enable runtime PM in mtd driver to notify graphics driver that
> whole card should be kept awake while nvm operations are
> performed through this driver.
>
> CC: Lucas De Marchi <lucas.demarchi@intel.com>
> Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
> Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
> ---
> drivers/mtd/devices/mtd-intel-dg.c | 79 +++++++++++++++++++++++++-----
> 1 file changed, 67 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-intel-dg.c
> index 230bf444b7fe..a84153812291 100644
> --- a/drivers/mtd/devices/mtd-intel-dg.c
> +++ b/drivers/mtd/devices/mtd-intel-dg.c
> @@ -15,11 +15,14 @@
> #include <linux/module.h>
> #include <linux/mtd/mtd.h>
> #include <linux/mtd/partitions.h>
> +#include <linux/pm_runtime.h>
> #include <linux/string.h>
> #include <linux/slab.h>
> #include <linux/sizes.h>
> #include <linux/types.h>
>
> +#define INTEL_DG_NVM_RPM_TIMEOUT 500
> +
> struct intel_dg_nvm {
> struct kref refcnt;
> struct mtd_info mtd;
> @@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
> loff_t from;
> size_t len;
> size_t total_len;
> + int ret = 0;
>
> if (WARN_ON(!nvm))
> return -EINVAL;
> @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
> total_len = info->len;
> addr = info->addr;
>
> + ret = pm_runtime_resume_and_get(&mtd->dev);
I'm glad we are not accessing the parent directly here anymore,
but to me it is still strange.
I feel that we should be using &aux_dev->dev; instead of mtd->dev.
What am I missing?
> + if (ret < 0) {
> + dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
> + return ret;
> + }
> +
> guard(mutex)(&nvm->lock);
>
> while (total_len > 0) {
> if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
> dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
> info->fail_addr = addr;
> - return -ERANGE;
> + ret = -ERANGE;
> + goto out;
> }
>
> idx = idg_nvm_get_region(nvm, addr);
> if (idx >= nvm->nregions) {
> dev_err(&mtd->dev, "out of range");
> info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
> - return -ERANGE;
> + ret = -ERANGE;
> + goto out;
> }
>
> from = addr - nvm->regions[idx].offset;
> @@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
> if (bytes < 0) {
> dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
> info->fail_addr += nvm->regions[idx].offset;
> - return bytes;
> + ret = bytes;
> + goto out;
> }
>
> addr += len;
> total_len -= len;
> }
>
> - return 0;
> +out:
> + pm_runtime_mark_last_busy(&mtd->dev);
> + pm_runtime_put_autosuspend(&mtd->dev);
> + return ret;
> }
>
> static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
> @@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
> if (len > nvm->regions[idx].size - from)
> len = nvm->regions[idx].size - from;
>
> + ret = pm_runtime_resume_and_get(&mtd->dev);
> + if (ret < 0) {
> + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
> + return ret;
> + }
> +
> guard(mutex)(&nvm->lock);
>
> ret = idg_read(nvm, region, from, len, buf);
> if (ret < 0) {
> dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
> - return ret;
> + } else {
> + *retlen = ret;
> + ret = 0;
> }
>
> - *retlen = ret;
> -
> - return 0;
> + pm_runtime_mark_last_busy(&mtd->dev);
> + pm_runtime_put_autosuspend(&mtd->dev);
> + return ret;
> }
>
> static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> @@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> if (len > nvm->regions[idx].size - to)
> len = nvm->regions[idx].size - to;
>
> + ret = pm_runtime_resume_and_get(&mtd->dev);
> + if (ret < 0) {
> + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
> + return ret;
> + }
> +
> guard(mutex)(&nvm->lock);
>
> ret = idg_write(nvm, region, to, len, buf);
> if (ret < 0) {
> dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
> - return ret;
> + } else {
> + *retlen = ret;
> + ret = 0;
> }
>
> - *retlen = ret;
> -
> - return 0;
> + pm_runtime_mark_last_busy(&mtd->dev);
> + pm_runtime_put_autosuspend(&mtd->dev);
> + return ret;
> }
>
> static void intel_dg_nvm_release(struct kref *kref)
> @@ -670,6 +702,15 @@ static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device
>
> kfree(parts);
>
> + if (ret)
> + goto out;
> +
> + devm_pm_runtime_enable(&nvm->mtd.dev);
> +
> + pm_runtime_set_autosuspend_delay(&nvm->mtd.dev, INTEL_DG_NVM_RPM_TIMEOUT);
> + pm_runtime_use_autosuspend(&nvm->mtd.dev);
> +
> +out:
> return ret;
> }
>
> @@ -720,6 +761,17 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
> n++;
> }
>
> + devm_pm_runtime_enable(device);
> +
> + pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT);
> + pm_runtime_use_autosuspend(device);
> +
> + ret = pm_runtime_resume_and_get(device);
> + if (ret < 0) {
> + dev_err(device, "rpm: get failed %d\n", ret);
> + goto err_norpm;
> + }
> +
> nvm->base = devm_ioremap_resource(device, &invm->bar);
> if (IS_ERR(nvm->base)) {
> dev_err(device, "mmio not mapped\n");
> @@ -742,9 +794,12 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
>
> dev_set_drvdata(&aux_dev->dev, nvm);
>
> + pm_runtime_put(device);
> return 0;
>
> err:
> + pm_runtime_put(device);
> +err_norpm:
> kref_put(&nvm->refcnt, intel_dg_nvm_release);
> return ret;
> }
> --
> 2.43.0
>
> -----Original Message-----
> From: Vivi, Rodrigo <rodrigo.vivi@intel.com>
> Sent: Wednesday, January 29, 2025 11:51 PM
> To: Usyskin, Alexander <alexander.usyskin@intel.com>
> Cc: Miquel Raynal <miquel.raynal@bootlin.com>; Richard Weinberger
> <richard@nod.at>; Vignesh Raghavendra <vigneshr@ti.com>; De Marchi,
> Lucas <lucas.demarchi@intel.com>; Thomas Hellström
> <thomas.hellstrom@linux.intel.com>; Maarten Lankhorst
> <maarten.lankhorst@linux.intel.com>; Maxime Ripard
> <mripard@kernel.org>; Thomas Zimmermann <tzimmermann@suse.de>;
> David Airlie <airlied@gmail.com>; Simona Vetter <simona@ffwll.ch>; Jani
> Nikula <jani.nikula@linux.intel.com>; Joonas Lahtinen
> <joonas.lahtinen@linux.intel.com>; Tvrtko Ursulin <tursulin@ursulin.net>;
> Poosa, Karthik <karthik.poosa@intel.com>; Abliyev, Reuven
> <reuven.abliyev@intel.com>; Weil, Oren jer <oren.jer.weil@intel.com>; linux-
> mtd@lists.infradead.org; dri-devel@lists.freedesktop.org; intel-
> gfx@lists.freedesktop.org; linux-kernel@vger.kernel.org
> Subject: Re: [PATCH v4 07/11] mtd: intel-dg: wake card on operations
>
> On Wed, Jan 01, 2025 at 05:39:21PM +0200, Alexander Usyskin wrote:
> > Enable runtime PM in mtd driver to notify graphics driver that
> > whole card should be kept awake while nvm operations are
> > performed through this driver.
> >
> > CC: Lucas De Marchi <lucas.demarchi@intel.com>
> > Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
> > Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
> > ---
> > drivers/mtd/devices/mtd-intel-dg.c | 79 +++++++++++++++++++++++++-----
> > 1 file changed, 67 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-
> intel-dg.c
> > index 230bf444b7fe..a84153812291 100644
> > --- a/drivers/mtd/devices/mtd-intel-dg.c
> > +++ b/drivers/mtd/devices/mtd-intel-dg.c
> > @@ -15,11 +15,14 @@
> > #include <linux/module.h>
> > #include <linux/mtd/mtd.h>
> > #include <linux/mtd/partitions.h>
> > +#include <linux/pm_runtime.h>
> > #include <linux/string.h>
> > #include <linux/slab.h>
> > #include <linux/sizes.h>
> > #include <linux/types.h>
> >
> > +#define INTEL_DG_NVM_RPM_TIMEOUT 500
> > +
> > struct intel_dg_nvm {
> > struct kref refcnt;
> > struct mtd_info mtd;
> > @@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd,
> struct erase_info *info)
> > loff_t from;
> > size_t len;
> > size_t total_len;
> > + int ret = 0;
> >
> > if (WARN_ON(!nvm))
> > return -EINVAL;
> > @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info
> *mtd, struct erase_info *info)
> > total_len = info->len;
> > addr = info->addr;
> >
> > + ret = pm_runtime_resume_and_get(&mtd->dev);
>
> I'm glad we are not accessing the parent directly here anymore,
> but to me it is still strange.
> I feel that we should be using &aux_dev->dev; instead of mtd->dev.
>
> What am I missing?
>
aux_dev->dev is the parent device stored in mtd->dev->parent
When in previous version I access mtd->dev->parent, I actually access aux_dev->dev.
Now we have real mtd master drevice and can rely on it runtime_pm.
> > + if (ret < 0) {
> > + dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
> > + return ret;
> > + }
> > +
> > guard(mutex)(&nvm->lock);
> >
> > while (total_len > 0) {
> > if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len,
> SZ_4K)) {
> > dev_err(&mtd->dev, "unaligned erase %llx %zx\n",
> addr, total_len);
> > info->fail_addr = addr;
> > - return -ERANGE;
> > + ret = -ERANGE;
> > + goto out;
> > }
> >
> > idx = idg_nvm_get_region(nvm, addr);
> > if (idx >= nvm->nregions) {
> > dev_err(&mtd->dev, "out of range");
> > info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
> > - return -ERANGE;
> > + ret = -ERANGE;
> > + goto out;
> > }
> >
> > from = addr - nvm->regions[idx].offset;
> > @@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info
> *mtd, struct erase_info *info)
> > if (bytes < 0) {
> > dev_dbg(&mtd->dev, "erase failed with %zd\n",
> bytes);
> > info->fail_addr += nvm->regions[idx].offset;
> > - return bytes;
> > + ret = bytes;
> > + goto out;
> > }
> >
> > addr += len;
> > total_len -= len;
> > }
> >
> > - return 0;
> > +out:
> > + pm_runtime_mark_last_busy(&mtd->dev);
> > + pm_runtime_put_autosuspend(&mtd->dev);
> > + return ret;
> > }
> >
> > static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
> > @@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd,
> loff_t from, size_t len,
> > if (len > nvm->regions[idx].size - from)
> > len = nvm->regions[idx].size - from;
> >
> > + ret = pm_runtime_resume_and_get(&mtd->dev);
> > + if (ret < 0) {
> > + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
> > + return ret;
> > + }
> > +
> > guard(mutex)(&nvm->lock);
> >
> > ret = idg_read(nvm, region, from, len, buf);
> > if (ret < 0) {
> > dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
> > - return ret;
> > + } else {
> > + *retlen = ret;
> > + ret = 0;
> > }
> >
> > - *retlen = ret;
> > -
> > - return 0;
> > + pm_runtime_mark_last_busy(&mtd->dev);
> > + pm_runtime_put_autosuspend(&mtd->dev);
> > + return ret;
> > }
> >
> > static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
> > @@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info
> *mtd, loff_t to, size_t len,
> > if (len > nvm->regions[idx].size - to)
> > len = nvm->regions[idx].size - to;
> >
> > + ret = pm_runtime_resume_and_get(&mtd->dev);
> > + if (ret < 0) {
> > + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
> > + return ret;
> > + }
> > +
> > guard(mutex)(&nvm->lock);
> >
> > ret = idg_write(nvm, region, to, len, buf);
> > if (ret < 0) {
> > dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
> > - return ret;
> > + } else {
> > + *retlen = ret;
> > + ret = 0;
> > }
> >
> > - *retlen = ret;
> > -
> > - return 0;
> > + pm_runtime_mark_last_busy(&mtd->dev);
> > + pm_runtime_put_autosuspend(&mtd->dev);
> > + return ret;
> > }
> >
> > static void intel_dg_nvm_release(struct kref *kref)
> > @@ -670,6 +702,15 @@ static int intel_dg_nvm_init_mtd(struct
> intel_dg_nvm *nvm, struct device *device
> >
> > kfree(parts);
> >
> > + if (ret)
> > + goto out;
> > +
> > + devm_pm_runtime_enable(&nvm->mtd.dev);
> > +
> > + pm_runtime_set_autosuspend_delay(&nvm->mtd.dev,
> INTEL_DG_NVM_RPM_TIMEOUT);
> > + pm_runtime_use_autosuspend(&nvm->mtd.dev);
> > +
> > +out:
> > return ret;
> > }
> >
> > @@ -720,6 +761,17 @@ static int intel_dg_mtd_probe(struct
> auxiliary_device *aux_dev,
> > n++;
> > }
> >
> > + devm_pm_runtime_enable(device);
> > +
> > + pm_runtime_set_autosuspend_delay(device,
> INTEL_DG_NVM_RPM_TIMEOUT);
> > + pm_runtime_use_autosuspend(device);
> > +
> > + ret = pm_runtime_resume_and_get(device);
> > + if (ret < 0) {
> > + dev_err(device, "rpm: get failed %d\n", ret);
> > + goto err_norpm;
> > + }
> > +
> > nvm->base = devm_ioremap_resource(device, &invm->bar);
> > if (IS_ERR(nvm->base)) {
> > dev_err(device, "mmio not mapped\n");
> > @@ -742,9 +794,12 @@ static int intel_dg_mtd_probe(struct
> auxiliary_device *aux_dev,
> >
> > dev_set_drvdata(&aux_dev->dev, nvm);
> >
> > + pm_runtime_put(device);
> > return 0;
> >
> > err:
> > + pm_runtime_put(device);
> > +err_norpm:
> > kref_put(&nvm->refcnt, intel_dg_nvm_release);
> > return ret;
> > }
> > --
> > 2.43.0
> >
On 30-01-2025 03:21, Rodrigo Vivi wrote:
> On Wed, Jan 01, 2025 at 05:39:21PM +0200, Alexander Usyskin wrote:
>> Enable runtime PM in mtd driver to notify graphics driver that
>> whole card should be kept awake while nvm operations are
>> performed through this driver.
>>
>> CC: Lucas De Marchi <lucas.demarchi@intel.com>
>> Acked-by: Miquel Raynal <miquel.raynal@bootlin.com>
>> Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
>> ---
>> drivers/mtd/devices/mtd-intel-dg.c | 79 +++++++++++++++++++++++++-----
>> 1 file changed, 67 insertions(+), 12 deletions(-)
>>
>> diff --git a/drivers/mtd/devices/mtd-intel-dg.c b/drivers/mtd/devices/mtd-intel-dg.c
>> index 230bf444b7fe..a84153812291 100644
>> --- a/drivers/mtd/devices/mtd-intel-dg.c
>> +++ b/drivers/mtd/devices/mtd-intel-dg.c
>> @@ -15,11 +15,14 @@
>> #include <linux/module.h>
>> #include <linux/mtd/mtd.h>
>> #include <linux/mtd/partitions.h>
>> +#include <linux/pm_runtime.h>
>> #include <linux/string.h>
>> #include <linux/slab.h>
>> #include <linux/sizes.h>
>> #include <linux/types.h>
>>
>> +#define INTEL_DG_NVM_RPM_TIMEOUT 500
>> +
>> struct intel_dg_nvm {
>> struct kref refcnt;
>> struct mtd_info mtd;
>> @@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>> loff_t from;
>> size_t len;
>> size_t total_len;
>> + int ret = 0;
>>
>> if (WARN_ON(!nvm))
>> return -EINVAL;
>> @@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>> total_len = info->len;
>> addr = info->addr;
>>
>> + ret = pm_runtime_resume_and_get(&mtd->dev);
> I'm glad we are not accessing the parent directly here anymore,
> but to me it is still strange.
> I feel that we should be using &aux_dev->dev; instead of mtd->dev.
>
> What am I missing?
>
>> + if (ret < 0) {
>> + dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
>> + return ret;
>> + }
>> +
>> guard(mutex)(&nvm->lock);
>>
>> while (total_len > 0) {
>> if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
>> dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
>> info->fail_addr = addr;
>> - return -ERANGE;
>> + ret = -ERANGE;
>> + goto out;
>> }
>>
>> idx = idg_nvm_get_region(nvm, addr);
>> if (idx >= nvm->nregions) {
>> dev_err(&mtd->dev, "out of range");
>> info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
>> - return -ERANGE;
>> + ret = -ERANGE;
>> + goto out;
>> }
>>
>> from = addr - nvm->regions[idx].offset;
>> @@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
>> if (bytes < 0) {
>> dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
>> info->fail_addr += nvm->regions[idx].offset;
>> - return bytes;
>> + ret = bytes;
>> + goto out;
>> }
>>
>> addr += len;
>> total_len -= len;
>> }
>>
>> - return 0;
>> +out:
>> + pm_runtime_mark_last_busy(&mtd->dev);
>> + pm_runtime_put_autosuspend(&mtd->dev);
>> + return ret;
>> }
>>
>> static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
>> @@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
>> if (len > nvm->regions[idx].size - from)
>> len = nvm->regions[idx].size - from;
>>
>> + ret = pm_runtime_resume_and_get(&mtd->dev);
>> + if (ret < 0) {
>> + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
>> + return ret;
>> + }
>> +
>> guard(mutex)(&nvm->lock);
>>
>> ret = idg_read(nvm, region, from, len, buf);
>> if (ret < 0) {
>> dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
>> - return ret;
>> + } else {
>> + *retlen = ret;
>> + ret = 0;
>> }
>>
>> - *retlen = ret;
>> -
>> - return 0;
>> + pm_runtime_mark_last_busy(&mtd->dev);
>> + pm_runtime_put_autosuspend(&mtd->dev);
>> + return ret;
>> }
>>
>> static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>> @@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
>> if (len > nvm->regions[idx].size - to)
>> len = nvm->regions[idx].size - to;
>>
>> + ret = pm_runtime_resume_and_get(&mtd->dev);
>> + if (ret < 0) {
>> + dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
>> + return ret;
>> + }
>> +
>> guard(mutex)(&nvm->lock);
>>
>> ret = idg_write(nvm, region, to, len, buf);
>> if (ret < 0) {
>> dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
>> - return ret;
>> + } else {
>> + *retlen = ret;
>> + ret = 0;
>> }
>>
>> - *retlen = ret;
>> -
>> - return 0;
>> + pm_runtime_mark_last_busy(&mtd->dev);
>> + pm_runtime_put_autosuspend(&mtd->dev);
>> + return ret;
>> }
>>
>> static void intel_dg_nvm_release(struct kref *kref)
>> @@ -670,6 +702,15 @@ static int intel_dg_nvm_init_mtd(struct intel_dg_nvm *nvm, struct device *device
>>
>> kfree(parts);
>>
>> + if (ret)
>> + goto out;
>> +
>> + devm_pm_runtime_enable(&nvm->mtd.dev);
>> +
>> + pm_runtime_set_autosuspend_delay(&nvm->mtd.dev, INTEL_DG_NVM_RPM_TIMEOUT);
>> + pm_runtime_use_autosuspend(&nvm->mtd.dev);
>> +
>> +out:
>> return ret;
>> }
>>
>> @@ -720,6 +761,17 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
>> n++;
>> }
>>
>> + devm_pm_runtime_enable(device);
>> +
>> + pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT);
>> + pm_runtime_use_autosuspend(device);
>> +
>> + ret = pm_runtime_resume_and_get(device);
>> + if (ret < 0) {
>> + dev_err(device, "rpm: get failed %d\n", ret);
>> + goto err_norpm;
>> + }
>> +
>> nvm->base = devm_ioremap_resource(device, &invm->bar);
>> if (IS_ERR(nvm->base)) {
>> dev_err(device, "mmio not mapped\n");
>> @@ -742,9 +794,12 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
>>
>> dev_set_drvdata(&aux_dev->dev, nvm);
>>
>> + pm_runtime_put(device);
>> return 0;
>>
>> err:
>> + pm_runtime_put(device);
>> +err_norpm:
>> kref_put(&nvm->refcnt, intel_dg_nvm_release);
>> return ret;
>> }
>> --
>> 2.43.0
Acked-by: Karthik Poosa <karthik.poosa@intel.com>
© 2016 - 2026 Red Hat, Inc.