Use macros defined in linux/cleanup.h to automate resource lifetime
control in the driver. The byproduct of this change is that the
negative conditionals are swapped by positive ones that are being
attached to the respective calls, hence the code uses the regular
pattern, i.e. checking for the error first.
Tested-by: Ferry Toth <fntoth@gmail.com>
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
---
drivers/platform/x86/intel_scu_ipc.c | 101 ++++++++++++---------------
1 file changed, 43 insertions(+), 58 deletions(-)
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 290b38627542..3a1584ed7db8 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -13,6 +13,7 @@
* along with other APIs.
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
@@ -99,23 +100,21 @@ static struct class intel_scu_ipc_class = {
*/
struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
{
- struct intel_scu_ipc_dev *scu = NULL;
+ guard(mutex)(&ipclock);
- mutex_lock(&ipclock);
if (ipcdev) {
get_device(&ipcdev->dev);
/*
* Prevent the IPC provider from being unloaded while it
* is being used.
*/
- if (!try_module_get(ipcdev->owner))
- put_device(&ipcdev->dev);
- else
- scu = ipcdev;
+ if (try_module_get(ipcdev->owner))
+ return ipcdev;
+
+ put_device(&ipcdev->dev);
}
- mutex_unlock(&ipclock);
- return scu;
+ return NULL;
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
@@ -289,12 +288,11 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
memset(cbuf, 0, sizeof(cbuf));
- mutex_lock(&ipclock);
+ guard(mutex)(&ipclock);
+
scu = intel_scu_ipc_get(scu);
- if (IS_ERR(scu)) {
- mutex_unlock(&ipclock);
+ if (IS_ERR(scu))
return PTR_ERR(scu);
- }
for (nc = 0; nc < count; nc++, offset += 2) {
cbuf[offset] = addr[nc];
@@ -319,13 +317,14 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
}
err = intel_scu_ipc_check_status(scu);
- if (!err) { /* Read rbuf */
- for (nc = 0, offset = 0; nc < 4; nc++, offset += 4)
- wbuf[nc] = ipc_data_readl(scu, offset);
- memcpy(data, wbuf, count);
- }
- mutex_unlock(&ipclock);
- return err;
+ if (err)
+ return err;
+
+ for (nc = 0, offset = 0; nc < 4; nc++, offset += 4)
+ wbuf[nc] = ipc_data_readl(scu, offset);
+ memcpy(data, wbuf, count);
+
+ return 0;
}
/**
@@ -446,17 +445,15 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
u32 cmdval;
int err;
- mutex_lock(&ipclock);
+ guard(mutex)(&ipclock);
+
scu = intel_scu_ipc_get(scu);
- if (IS_ERR(scu)) {
- mutex_unlock(&ipclock);
+ if (IS_ERR(scu))
return PTR_ERR(scu);
- }
cmdval = sub << 12 | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
- mutex_unlock(&ipclock);
if (err)
dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
return err;
@@ -485,18 +482,17 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
{
size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
- u32 cmdval, inbuf[4] = {};
+ u32 cmdval, inbuf[4] = {}, outbuf[4] = {};
int i, err;
if (inbuflen > 4 || outbuflen > 4)
return -EINVAL;
- mutex_lock(&ipclock);
+ guard(mutex)(&ipclock);
+
scu = intel_scu_ipc_get(scu);
- if (IS_ERR(scu)) {
- mutex_unlock(&ipclock);
+ if (IS_ERR(scu))
return PTR_ERR(scu);
- }
memcpy(inbuf, in, inlen);
for (i = 0; i < inbuflen; i++)
@@ -505,20 +501,17 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
cmdval = (size << 16) | (sub << 12) | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
-
- if (!err) {
- u32 outbuf[4] = {};
-
- for (i = 0; i < outbuflen; i++)
- outbuf[i] = ipc_data_readl(scu, 4 * i);
-
- memcpy(out, outbuf, outlen);
+ if (err) {
+ dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
+ return err;
}
- mutex_unlock(&ipclock);
- if (err)
- dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
- return err;
+ for (i = 0; i < outbuflen; i++)
+ outbuf[i] = ipc_data_readl(scu, 4 * i);
+
+ memcpy(out, outbuf, outlen);
+
+ return 0;
}
EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
@@ -572,18 +565,15 @@ __intel_scu_ipc_register(struct device *parent,
struct intel_scu_ipc_dev *scu;
void __iomem *ipc_base;
- mutex_lock(&ipclock);
+ guard(mutex)(&ipclock);
+
/* We support only one IPC */
- if (ipcdev) {
- err = -EBUSY;
- goto err_unlock;
- }
+ if (ipcdev)
+ return ERR_PTR(-EBUSY);
scu = kzalloc(sizeof(*scu), GFP_KERNEL);
- if (!scu) {
- err = -ENOMEM;
- goto err_unlock;
- }
+ if (!scu)
+ return ERR_PTR(-ENOMEM);
scu->owner = owner;
scu->dev.parent = parent;
@@ -621,13 +611,11 @@ __intel_scu_ipc_register(struct device *parent,
err = device_register(&scu->dev);
if (err) {
put_device(&scu->dev);
- goto err_unlock;
+ return ERR_PTR(err);
}
/* Assign device at last */
ipcdev = scu;
- mutex_unlock(&ipclock);
-
return scu;
err_unmap:
@@ -636,9 +624,6 @@ __intel_scu_ipc_register(struct device *parent,
release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
err_free:
kfree(scu);
-err_unlock:
- mutex_unlock(&ipclock);
-
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
@@ -652,12 +637,12 @@ EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
*/
void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
{
- mutex_lock(&ipclock);
+ guard(mutex)(&ipclock);
+
if (!WARN_ON(!ipcdev)) {
ipcdev = NULL;
device_unregister(&scu->dev);
}
- mutex_unlock(&ipclock);
}
EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
--
2.43.0.rc1.1336.g36b5255a03ac
On Mon, 21 Oct 2024, Andy Shevchenko wrote: > Use macros defined in linux/cleanup.h to automate resource lifetime > control in the driver. The byproduct of this change is that the > negative conditionals are swapped by positive ones that are being > attached to the respective calls, hence the code uses the regular > pattern, i.e. checking for the error first. > > Tested-by: Ferry Toth <fntoth@gmail.com> > Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> > --- > drivers/platform/x86/intel_scu_ipc.c | 101 ++++++++++++--------------- > 1 file changed, 43 insertions(+), 58 deletions(-) > > diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c > index 290b38627542..3a1584ed7db8 100644 > --- a/drivers/platform/x86/intel_scu_ipc.c > +++ b/drivers/platform/x86/intel_scu_ipc.c > @@ -13,6 +13,7 @@ > * along with other APIs. > */ > > +#include <linux/cleanup.h> > #include <linux/delay.h> > #include <linux/device.h> > #include <linux/errno.h> > @@ -99,23 +100,21 @@ static struct class intel_scu_ipc_class = { > */ > struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void) > { > - struct intel_scu_ipc_dev *scu = NULL; > + guard(mutex)(&ipclock); > > - mutex_lock(&ipclock); > if (ipcdev) { > get_device(&ipcdev->dev); > /* > * Prevent the IPC provider from being unloaded while it > * is being used. > */ > - if (!try_module_get(ipcdev->owner)) > - put_device(&ipcdev->dev); > - else > - scu = ipcdev; > + if (try_module_get(ipcdev->owner)) > + return ipcdev; > + > + put_device(&ipcdev->dev); > } > > - mutex_unlock(&ipclock); > - return scu; > + return NULL; > } > EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get); > > @@ -289,12 +288,11 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data, > > memset(cbuf, 0, sizeof(cbuf)); > > - mutex_lock(&ipclock); > + guard(mutex)(&ipclock); > + > scu = intel_scu_ipc_get(scu); > - if (IS_ERR(scu)) { > - mutex_unlock(&ipclock); > + if (IS_ERR(scu)) > return PTR_ERR(scu); > - } > > for (nc = 0; nc < count; nc++, offset += 2) { > cbuf[offset] = addr[nc]; > @@ -319,13 +317,14 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data, > } > > err = intel_scu_ipc_check_status(scu); > - if (!err) { /* Read rbuf */ > - for (nc = 0, offset = 0; nc < 4; nc++, offset += 4) > - wbuf[nc] = ipc_data_readl(scu, offset); > - memcpy(data, wbuf, count); > - } > - mutex_unlock(&ipclock); > - return err; > + if (err) > + return err; > + > + for (nc = 0, offset = 0; nc < 4; nc++, offset += 4) > + wbuf[nc] = ipc_data_readl(scu, offset); > + memcpy(data, wbuf, count); > + > + return 0; > } > > /** > @@ -446,17 +445,15 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd, > u32 cmdval; > int err; > > - mutex_lock(&ipclock); > + guard(mutex)(&ipclock); > + > scu = intel_scu_ipc_get(scu); > - if (IS_ERR(scu)) { > - mutex_unlock(&ipclock); > + if (IS_ERR(scu)) > return PTR_ERR(scu); > - } > > cmdval = sub << 12 | cmd; > ipc_command(scu, cmdval); > err = intel_scu_ipc_check_status(scu); > - mutex_unlock(&ipclock); > if (err) > dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err); > return err; > @@ -485,18 +482,17 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd, > { > size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32)); > size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32)); > - u32 cmdval, inbuf[4] = {}; > + u32 cmdval, inbuf[4] = {}, outbuf[4] = {}; > int i, err; > > if (inbuflen > 4 || outbuflen > 4) > return -EINVAL; > > - mutex_lock(&ipclock); > + guard(mutex)(&ipclock); > + > scu = intel_scu_ipc_get(scu); > - if (IS_ERR(scu)) { > - mutex_unlock(&ipclock); > + if (IS_ERR(scu)) > return PTR_ERR(scu); > - } > > memcpy(inbuf, in, inlen); > for (i = 0; i < inbuflen; i++) > @@ -505,20 +501,17 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd, > cmdval = (size << 16) | (sub << 12) | cmd; > ipc_command(scu, cmdval); > err = intel_scu_ipc_check_status(scu); > - > - if (!err) { > - u32 outbuf[4] = {}; > - > - for (i = 0; i < outbuflen; i++) > - outbuf[i] = ipc_data_readl(scu, 4 * i); > - > - memcpy(out, outbuf, outlen); > + if (err) { > + dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err); > + return err; > } > > - mutex_unlock(&ipclock); > - if (err) > - dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err); > - return err; > + for (i = 0; i < outbuflen; i++) > + outbuf[i] = ipc_data_readl(scu, 4 * i); > + > + memcpy(out, outbuf, outlen); > + > + return 0; > } > EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size); > > @@ -572,18 +565,15 @@ __intel_scu_ipc_register(struct device *parent, > struct intel_scu_ipc_dev *scu; > void __iomem *ipc_base; > > - mutex_lock(&ipclock); > + guard(mutex)(&ipclock); > + > /* We support only one IPC */ > - if (ipcdev) { > - err = -EBUSY; > - goto err_unlock; > - } > + if (ipcdev) > + return ERR_PTR(-EBUSY); > > scu = kzalloc(sizeof(*scu), GFP_KERNEL); > - if (!scu) { > - err = -ENOMEM; > - goto err_unlock; > - } > + if (!scu) > + return ERR_PTR(-ENOMEM); > > scu->owner = owner; > scu->dev.parent = parent; > @@ -621,13 +611,11 @@ __intel_scu_ipc_register(struct device *parent, > err = device_register(&scu->dev); > if (err) { > put_device(&scu->dev); > - goto err_unlock; > + return ERR_PTR(err); > } > > /* Assign device at last */ > ipcdev = scu; > - mutex_unlock(&ipclock); > - > return scu; > > err_unmap: > @@ -636,9 +624,6 @@ __intel_scu_ipc_register(struct device *parent, > release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem)); > err_free: > kfree(scu); > -err_unlock: > - mutex_unlock(&ipclock); > - > return ERR_PTR(err); > } > EXPORT_SYMBOL_GPL(__intel_scu_ipc_register); > @@ -652,12 +637,12 @@ EXPORT_SYMBOL_GPL(__intel_scu_ipc_register); > */ > void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu) > { > - mutex_lock(&ipclock); > + guard(mutex)(&ipclock); > + > if (!WARN_ON(!ipcdev)) { > ipcdev = NULL; > device_unregister(&scu->dev); > } > - mutex_unlock(&ipclock); > } > EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister); IMO, this change is doing too many things at once and it's hard to justify why those changes must be kept in the same patch. If the guard() change is done first and only then the logic reversions, both patches would probably be near trivial to review for correctness. -- i.
On Mon, Oct 21, 2024 at 12:32 PM Ilpo Järvinen <ilpo.jarvinen@linux.intel.com> wrote: > On Mon, 21 Oct 2024, Andy Shevchenko wrote: ... > IMO, this change is doing too many things at once and it's hard to justify > why those changes must be kept in the same patch. If the guard() change > is done first and only then the logic reversions, both patches would > probably be near trivial to review for correctness. Are you insisting on this? Because that's how I have done similar changes in the past all over the kernel, and IIRC you are the first one asking for this :-) -- With Best Regards, Andy Shevchenko
On Mon, 21 Oct 2024, Andy Shevchenko wrote: > On Mon, Oct 21, 2024 at 12:32 PM Ilpo Järvinen > <ilpo.jarvinen@linux.intel.com> wrote: > > On Mon, 21 Oct 2024, Andy Shevchenko wrote: > > ... > > > IMO, this change is doing too many things at once and it's hard to justify > > why those changes must be kept in the same patch. If the guard() change > > is done first and only then the logic reversions, both patches would > > probably be near trivial to review for correctness. > > Are you insisting on this? > Because that's how I have done similar changes in the past all over > the kernel, and IIRC you are the first one asking for this :-) Well, I know I could go through the patch as is and likely find out it's correct. But as is, it requires clearly more effort that it would if those two things would be separated. The contexts would be much smaller and focused if you split this into two and since you know the end result (the current patch), the second patch is just the diff of the first to it. I'm not saying it's always required but TBH, this patch definitely would get simpler to read if you split it into two. So to answer your question, it's more of a judgement call than insisting it always. -- i.
On Mon, Oct 21, 2024 at 11:38:52AM +0300, Andy Shevchenko wrote: > Use macros defined in linux/cleanup.h to automate resource lifetime > control in the driver. The byproduct of this change is that the > negative conditionals are swapped by positive ones that are being > attached to the respective calls, hence the code uses the regular > pattern, i.e. checking for the error first. > > Tested-by: Ferry Toth <fntoth@gmail.com> > Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
© 2016 - 2024 Red Hat, Inc.