Use guard() and scoped_guard() to replace manual mutex lock/unlock
calls. This simplifies error handling and ensures RAII-style cleanup.
guard() is used in read_raw, write_raw, trig_reen, trigger_set_state,
suspend, and resume. Case blocks using guard() in read_raw and
write_raw are wrapped in braces at the case label level to ensure
clear scope for the cleanup guards.
scoped_guard() is used in remove and runtime_suspend where a short
mutex-protected scope is needed for a single function call.
The trigger_handler function is left unchanged as mixing guard() with
goto error paths can be fragile.
Signed-off-by: Neel Bullywon <neelb2403@gmail.com>
---
drivers/iio/magnetometer/bmc150_magn.c | 109 ++++++++++---------------
1 file changed, 41 insertions(+), 68 deletions(-)
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 6a73f6e2f1f0..ff6196c86e6b 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm.h>
@@ -453,33 +454,29 @@ static int bmc150_magn_read_raw(struct iio_dev *indio_dev,
s32 values[AXIS_XYZ_MAX];
switch (mask) {
- case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_RAW: {
if (iio_buffer_enabled(indio_dev))
return -EBUSY;
- mutex_lock(&data->mutex);
+
+ guard(mutex)(&data->mutex);
ret = bmc150_magn_set_power_state(data, true);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
+ if (ret < 0)
return ret;
- }
ret = bmc150_magn_read_xyz(data, values);
if (ret < 0) {
bmc150_magn_set_power_state(data, false);
- mutex_unlock(&data->mutex);
return ret;
}
*val = values[chan->scan_index];
ret = bmc150_magn_set_power_state(data, false);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
+ if (ret < 0)
return ret;
- }
- mutex_unlock(&data->mutex);
return IIO_VAL_INT;
+ }
case IIO_CHAN_INFO_SCALE:
/*
* The API/driver performs an off-chip temperature
@@ -527,48 +524,39 @@ static int bmc150_magn_write_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
+ case IIO_CHAN_INFO_SAMP_FREQ: {
if (val > data->max_odr)
return -EINVAL;
- mutex_lock(&data->mutex);
- ret = bmc150_magn_set_odr(data, val);
- mutex_unlock(&data->mutex);
- return ret;
+ guard(mutex)(&data->mutex);
+ return bmc150_magn_set_odr(data, val);
+ }
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
switch (chan->channel2) {
case IIO_MOD_X:
- case IIO_MOD_Y:
+ case IIO_MOD_Y: {
if (val < 1 || val > 511)
return -EINVAL;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bmc150_magn_set_max_odr(data, val, 0, 0);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
+ if (ret < 0)
return ret;
- }
- ret = regmap_update_bits(data->regmap,
+ return regmap_update_bits(data->regmap,
BMC150_MAGN_REG_REP_XY,
BMC150_MAGN_REG_REP_DATAMASK,
- BMC150_MAGN_REPXY_TO_REGVAL
- (val));
- mutex_unlock(&data->mutex);
- return ret;
- case IIO_MOD_Z:
+ BMC150_MAGN_REPXY_TO_REGVAL(val));
+ }
+ case IIO_MOD_Z: {
if (val < 1 || val > 256)
return -EINVAL;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bmc150_magn_set_max_odr(data, 0, val, 0);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
+ if (ret < 0)
return ret;
- }
- ret = regmap_update_bits(data->regmap,
+ return regmap_update_bits(data->regmap,
BMC150_MAGN_REG_REP_Z,
BMC150_MAGN_REG_REP_DATAMASK,
- BMC150_MAGN_REPZ_TO_REGVAL
- (val));
- mutex_unlock(&data->mutex);
- return ret;
+ BMC150_MAGN_REPZ_TO_REGVAL(val));
+ }
default:
return -EINVAL;
}
@@ -782,9 +770,8 @@ static void bmc150_magn_trig_reen(struct iio_trigger *trig)
if (!data->dready_trigger_on)
return;
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = bmc150_magn_reset_intr(data);
- mutex_unlock(&data->mutex);
if (ret)
dev_err(data->dev, "Failed to reset interrupt\n");
}
@@ -794,32 +781,28 @@ static int bmc150_magn_data_rdy_trigger_set_state(struct iio_trigger *trig,
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct bmc150_magn_data *data = iio_priv(indio_dev);
- int ret = 0;
+ int ret;
+
+ guard(mutex)(&data->mutex);
- mutex_lock(&data->mutex);
if (state == data->dready_trigger_on)
- goto err_unlock;
+ return 0;
ret = regmap_update_bits(data->regmap, BMC150_MAGN_REG_INT_DRDY,
BMC150_MAGN_MASK_DRDY_EN,
state << BMC150_MAGN_SHIFT_DRDY_EN);
if (ret < 0)
- goto err_unlock;
+ return ret;
data->dready_trigger_on = state;
if (state) {
ret = bmc150_magn_reset_intr(data);
if (ret < 0)
- goto err_unlock;
+ return ret;
}
- mutex_unlock(&data->mutex);
return 0;
-
-err_unlock:
- mutex_unlock(&data->mutex);
- return ret;
}
static const struct iio_trigger_ops bmc150_magn_trigger_ops = {
@@ -980,9 +963,8 @@ void bmc150_magn_remove(struct device *dev)
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
- mutex_lock(&data->mutex);
- bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
}
@@ -995,10 +977,9 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
struct bmc150_magn_data *data = iio_priv(indio_dev);
int ret;
- mutex_lock(&data->mutex);
- ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
- true);
- mutex_unlock(&data->mutex);
+ scoped_guard(mutex, &data->mutex)
+ ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
+ true);
if (ret < 0) {
dev_err(dev, "powering off device failed\n");
return ret;
@@ -1024,28 +1005,20 @@ static int bmc150_magn_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&data->mutex);
- ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
- true);
- mutex_unlock(&data->mutex);
- return ret;
+ guard(mutex)(&data->mutex);
+ return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
+ true);
}
static int bmc150_magn_resume(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
- int ret;
-
- mutex_lock(&data->mutex);
- ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
- true);
- mutex_unlock(&data->mutex);
- return ret;
+ guard(mutex)(&data->mutex);
+ return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
+ true);
}
#endif
--
2.44.0
On Sun, Feb 15, 2026 at 08:54:52PM -0500, Neel Bullywon wrote:
> Use guard() and scoped_guard() to replace manual mutex lock/unlock
> calls. This simplifies error handling and ensures RAII-style cleanup.
>
> guard() is used in read_raw, write_raw, trig_reen, trigger_set_state,
> suspend, and resume. Case blocks using guard() in read_raw and
> write_raw are wrapped in braces at the case label level to ensure
> clear scope for the cleanup guards.
>
> scoped_guard() is used in remove and runtime_suspend where a short
> mutex-protected scope is needed for a single function call.
>
> The trigger_handler function is left unchanged as mixing guard() with
> goto error paths can be fragile.
...
> - mutex_lock(&data->mutex);
> - bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
> - mutex_unlock(&data->mutex);
> + scoped_guard(mutex, &data->mutex)
> + bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
Wonder if we can move this to a helper:
static int bmc150_magn_set_power_mode_locked(data, mode) // locked or unlocked, dunno with proper naming
{
guard(mutex)(&data->mutex)
return bmc150_magn_set_power_mode(data, mode, true);
}
...
> - ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> - true);
> - mutex_unlock(&data->mutex);
> + scoped_guard(mutex, &data->mutex)
> + ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> + true);
> if (ret < 0) {
> dev_err(dev, "powering off device failed\n");
> return ret;
> }
Ditto.
...
> {
> struct iio_dev *indio_dev = dev_get_drvdata(dev);
> struct bmc150_magn_data *data = iio_priv(indio_dev);
> - int ret;
> -
> - mutex_lock(&data->mutex);
> - ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> - true);
> - mutex_unlock(&data->mutex);
>
> - return ret;
> + guard(mutex)(&data->mutex);
> + return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> + true);
Ditto.
> }
>
> static int bmc150_magn_resume(struct device *dev)
> {
> struct iio_dev *indio_dev = dev_get_drvdata(dev);
> struct bmc150_magn_data *data = iio_priv(indio_dev);
> - int ret;
> -
> - mutex_lock(&data->mutex);
> - ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
> - true);
> - mutex_unlock(&data->mutex);
>
> - return ret;
> + guard(mutex)(&data->mutex);
> + return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
> + true);
Ditto.
> }
--
With Best Regards,
Andy Shevchenko
On Mon, 16 Feb 2026 10:33:25 +0200
Andy Shevchenko <andriy.shevchenko@intel.com> wrote:
> On Sun, Feb 15, 2026 at 08:54:52PM -0500, Neel Bullywon wrote:
> > Use guard() and scoped_guard() to replace manual mutex lock/unlock
> > calls. This simplifies error handling and ensures RAII-style cleanup.
> >
> > guard() is used in read_raw, write_raw, trig_reen, trigger_set_state,
> > suspend, and resume. Case blocks using guard() in read_raw and
> > write_raw are wrapped in braces at the case label level to ensure
> > clear scope for the cleanup guards.
> >
> > scoped_guard() is used in remove and runtime_suspend where a short
> > mutex-protected scope is needed for a single function call.
> >
> > The trigger_handler function is left unchanged as mixing guard() with
> > goto error paths can be fragile.
>
> ...
>
> > - mutex_lock(&data->mutex);
> > - bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
> > - mutex_unlock(&data->mutex);
> > + scoped_guard(mutex, &data->mutex)
> > + bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
>
> Wonder if we can move this to a helper:
>
> static int bmc150_magn_set_power_mode_locked(data, mode) // locked or unlocked, dunno with proper naming
> {
> guard(mutex)(&data->mutex)
> return bmc150_magn_set_power_mode(data, mode, true);
> }
>
> ...
Whilst it involves taking the mutex in probe() before it's strictly
necessary (as everything is serialized anyway until we register the
userspace interfaces, I wonder if a simpler option is to rely on the
mutex_init() being early enough and just put the guard unconditionally
in bmc150_magn_set_power_mode()?
The snag is in runtime PM. The locking in there is really odd
(smells broken to me)
It requires the lock to already be held for resume, but must not
be held for runtime_suspend. Superficially I suspect this only works
because the autosuspend delay gets us past the unlock in suspend path.
I think we can unwind that mess though by changing read_raw() to
not hold the lock across both power and reading phases (I don't think
it matters if we drop it and grab it again in quick succession as
read_raw is never really a fast path).
The usage of runtime pm in buffer_preenable / postdisable should be
fine but at this point I'm getting nervous. Neel, do you have the
hardware for this one to check we don't break anything trying to clean
this up? Or does anyone else who is willing to test any changes?
Jonathan
>
> > - ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> > - true);
> > - mutex_unlock(&data->mutex);
> > + scoped_guard(mutex, &data->mutex)
> > + ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> > + true);
> > if (ret < 0) {
> > dev_err(dev, "powering off device failed\n");
> > return ret;
> > }
>
> Ditto.
>
>
> ...
>
> > {
> > struct iio_dev *indio_dev = dev_get_drvdata(dev);
> > struct bmc150_magn_data *data = iio_priv(indio_dev);
> > - int ret;
> > -
> > - mutex_lock(&data->mutex);
> > - ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> > - true);
> > - mutex_unlock(&data->mutex);
> >
> > - return ret;
> > + guard(mutex)(&data->mutex);
> > + return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SLEEP,
> > + true);
>
> Ditto.
>
> > }
> >
> > static int bmc150_magn_resume(struct device *dev)
> > {
> > struct iio_dev *indio_dev = dev_get_drvdata(dev);
> > struct bmc150_magn_data *data = iio_priv(indio_dev);
> > - int ret;
> > -
> > - mutex_lock(&data->mutex);
> > - ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
> > - true);
> > - mutex_unlock(&data->mutex);
> >
> > - return ret;
> > + guard(mutex)(&data->mutex);
> > + return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
> > + true);
>
> Ditto.
>
> > }
>
I don't have the BMC150 hardware unfortunately, so I can't test runtime PM behavior changes directly. For the v8 of patch 1, I see two approaches based on the feedback given: Andy's suggestion: add a bmc150_magn_set_power_mode_locked() helper to deduplicate the lock+call pattern in remove, runtime_suspend, suspend, and resume. Your suggestion: move the guard() unconditionally into bmc150_magn_set_power_mode() itself, which is cleaner but touches the runtime PM locking that you flagged as suspicious. Given I can't test runtime PM, would you prefer I go with Andy's suggestion for now, and leave the deeper runtime PM cleanup for a separate series if someone with hardware can verify it? Or if you'd rather I take a shot at yours with the read_raw() rework you described, I'm happy to do that — just would need someone to test it.
© 2016 - 2026 Red Hat, Inc.