Add a new devm_iio_dmaengine_buffer_setup_ext2() function to handle
cases where the DMA channel is managed by the caller rather than being
requested and released by the iio_dmaengine module.
Signed-off-by: David Lechner <dlechner@baylibre.com>
---
v4 changes:
* This replaces "iio: buffer-dmaengine: generalize requesting DMA channel"
---
drivers/iio/buffer/industrialio-buffer-dmaengine.c | 107 +++++++++++++++------
include/linux/iio/buffer-dmaengine.h | 5 +
2 files changed, 81 insertions(+), 31 deletions(-)
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 054af21dfa65..602cb2e147a6 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -33,6 +33,7 @@ struct dmaengine_buffer {
struct iio_dma_buffer_queue queue;
struct dma_chan *chan;
+ bool owns_chan;
struct list_head active;
size_t align;
@@ -216,28 +217,23 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel)
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan,
+ bool owns_chan)
{
struct dmaengine_buffer *dmaengine_buffer;
unsigned int width, src_width, dest_width;
struct dma_slave_caps caps;
- struct dma_chan *chan;
int ret;
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
- if (!dmaengine_buffer)
- return ERR_PTR(-ENOMEM);
-
- chan = dma_request_chan(dev, channel);
- if (IS_ERR(chan)) {
- ret = PTR_ERR(chan);
- goto err_free;
+ if (!dmaengine_buffer) {
+ ret = -ENOMEM;
+ goto err_release;
}
ret = dma_get_slave_caps(chan, &caps);
if (ret < 0)
- goto err_release;
+ goto err_free;
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
@@ -252,6 +248,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
INIT_LIST_HEAD(&dmaengine_buffer->active);
dmaengine_buffer->chan = chan;
+ dmaengine_buffer->owns_chan = owns_chan;
dmaengine_buffer->align = width;
dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
@@ -263,10 +260,12 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
return &dmaengine_buffer->queue.buffer;
-err_release:
- dma_release_channel(chan);
err_free:
kfree(dmaengine_buffer);
+err_release:
+ if (owns_chan)
+ dma_release_channel(chan);
+
return ERR_PTR(ret);
}
@@ -282,12 +281,38 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_to_dmaengine_buffer(buffer);
iio_dma_buffer_exit(&dmaengine_buffer->queue);
- dma_release_channel(dmaengine_buffer->chan);
-
iio_buffer_put(buffer);
+
+ if (dmaengine_buffer->owns_chan)
+ dma_release_channel(dmaengine_buffer->chan);
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
+static struct iio_buffer
+*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
+ struct dma_chan *chan, bool owns_chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ buffer = iio_dmaengine_buffer_alloc(chan, owns_chan);
+ if (IS_ERR(buffer))
+ return ERR_CAST(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ buffer->direction = dir;
+
+ ret = iio_device_attach_buffer(indio_dev, buffer);
+ if (ret) {
+ iio_dmaengine_buffer_free(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
+
/**
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
* @dev: DMA channel consumer device
@@ -308,24 +333,13 @@ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
const char *channel,
enum iio_buffer_direction dir)
{
- struct iio_buffer *buffer;
- int ret;
-
- buffer = iio_dmaengine_buffer_alloc(dev, channel);
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
- buffer->direction = dir;
+ struct dma_chan *chan;
- ret = iio_device_attach_buffer(indio_dev, buffer);
- if (ret) {
- iio_dmaengine_buffer_free(buffer);
- return ERR_PTR(ret);
- }
+ chan = dma_request_chan(dev, channel);
+ if (IS_ERR(chan))
+ return ERR_CAST(chan);
- return buffer;
+ return __iio_dmaengine_buffer_setup_ext(indio_dev, chan, true, dir);
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
@@ -362,6 +376,37 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
+/**
+ * devm_iio_dmaengine_buffer_setup_ext2() - Setup a DMA buffer for an IIO device
+ * @dev: Device for devm ownership
+ * @indio_dev: IIO device to which to attach this buffer.
+ * @chan: DMA channel
+ * @dir: Direction of buffer (in or out)
+ *
+ * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
+ * and attaches it to an IIO device with iio_device_attach_buffer().
+ * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
+ * IIO device.
+ *
+ * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the
+ * caller manages requesting and releasing the DMA channel.
+ */
+int devm_iio_dmaengine_buffer_setup_ext2(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+
+ buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, false, dir);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+ buffer);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext2, IIO_DMAENGINE_BUFFER);
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 81d9a19aeb91..7bdb979b59f2 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -11,6 +11,7 @@
struct iio_dev;
struct device;
+struct dma_chan;
void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
@@ -26,6 +27,10 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
enum iio_buffer_direction dir);
+int devm_iio_dmaengine_buffer_setup_ext2(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir);
#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
--
2.43.0
I still need to look better at this but I do have one though already :) On Wed, 2024-10-23 at 15:59 -0500, David Lechner wrote: > Add a new devm_iio_dmaengine_buffer_setup_ext2() function to handle > cases where the DMA channel is managed by the caller rather than being > requested and released by the iio_dmaengine module. > > Signed-off-by: David Lechner <dlechner@baylibre.com> > --- > > v4 changes: > * This replaces "iio: buffer-dmaengine: generalize requesting DMA channel" > --- > drivers/iio/buffer/industrialio-buffer-dmaengine.c | 107 +++++++++++++++------ > include/linux/iio/buffer-dmaengine.h | 5 + > 2 files changed, 81 insertions(+), 31 deletions(-) > > diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c > b/drivers/iio/buffer/industrialio-buffer-dmaengine.c > index 054af21dfa65..602cb2e147a6 100644 > --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c > +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c > @@ -33,6 +33,7 @@ struct dmaengine_buffer { > struct iio_dma_buffer_queue queue; > > struct dma_chan *chan; > + bool owns_chan; > struct list_head active; > > size_t align; > @@ -216,28 +217,23 @@ static const struct iio_dev_attr > *iio_dmaengine_buffer_attrs[] = { > * Once done using the buffer iio_dmaengine_buffer_free() should be used to > * release it. > */ > -static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, > - const char *channel) > +static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan, > + bool owns_chan) > { > struct dmaengine_buffer *dmaengine_buffer; > unsigned int width, src_width, dest_width; > struct dma_slave_caps caps; > - struct dma_chan *chan; > int ret; > > dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); > - if (!dmaengine_buffer) > - return ERR_PTR(-ENOMEM); > - > - chan = dma_request_chan(dev, channel); > - if (IS_ERR(chan)) { > - ret = PTR_ERR(chan); > - goto err_free; > + if (!dmaengine_buffer) { > + ret = -ENOMEM; > + goto err_release; > } > > ret = dma_get_slave_caps(chan, &caps); > if (ret < 0) > - goto err_release; > + goto err_free; > > /* Needs to be aligned to the maximum of the minimums */ > if (caps.src_addr_widths) > @@ -252,6 +248,7 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct > device *dev, > > INIT_LIST_HEAD(&dmaengine_buffer->active); > dmaengine_buffer->chan = chan; > + dmaengine_buffer->owns_chan = owns_chan; > dmaengine_buffer->align = width; > dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev); > > @@ -263,10 +260,12 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct > device *dev, > > return &dmaengine_buffer->queue.buffer; > > -err_release: > - dma_release_channel(chan); > err_free: > kfree(dmaengine_buffer); > +err_release: > + if (owns_chan) > + dma_release_channel(chan); > + > return ERR_PTR(ret); > } > > @@ -282,12 +281,38 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer) > iio_buffer_to_dmaengine_buffer(buffer); > > iio_dma_buffer_exit(&dmaengine_buffer->queue); > - dma_release_channel(dmaengine_buffer->chan); > - > iio_buffer_put(buffer); > + > + if (dmaengine_buffer->owns_chan) > + dma_release_channel(dmaengine_buffer->chan); Not sure if I agree much with this owns_chan flag. The way I see it, we should always handover the lifetime of the DMA channel to the IIO DMA framework. Note that even the device you pass in for both requesting the channel of the spi_offload and for setting up the DMA buffer is the same (and i suspect it will always be) so I would not go with the trouble. And with this assumption we could simplify a bit more the spi implementation. And not even related but I even suspect the current implementation could be problematic. Basically I'm suspecting that the lifetime of the DMA channel should be attached to the lifetime of the iio_buffer. IOW, we should only release the channel in iio_dmaengine_buffer_release() - in which case the current implementation with the spi_offload would also be buggy. But bah, the second point is completely theoretical and likely very hard to reproduce in real life (if reproducible at all - for now it's only something I suspect) - Nuno Sá
On 10/25/24 8:24 AM, Nuno Sá wrote: > I still need to look better at this but I do have one though already :) > > On Wed, 2024-10-23 at 15:59 -0500, David Lechner wrote: >> Add a new devm_iio_dmaengine_buffer_setup_ext2() function to handle >> cases where the DMA channel is managed by the caller rather than being >> requested and released by the iio_dmaengine module. >> >> Signed-off-by: David Lechner <dlechner@baylibre.com> >> --- >> >> v4 changes: >> * This replaces "iio: buffer-dmaengine: generalize requesting DMA channel" >> --- ... >> @@ -282,12 +281,38 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer) >> iio_buffer_to_dmaengine_buffer(buffer); >> >> iio_dma_buffer_exit(&dmaengine_buffer->queue); >> - dma_release_channel(dmaengine_buffer->chan); >> - >> iio_buffer_put(buffer); >> + >> + if (dmaengine_buffer->owns_chan) >> + dma_release_channel(dmaengine_buffer->chan); > > Not sure if I agree much with this owns_chan flag. The way I see it, we should always > handover the lifetime of the DMA channel to the IIO DMA framework. Note that even the > device you pass in for both requesting the channel of the spi_offload and for > setting up the DMA buffer is the same (and i suspect it will always be) so I would > not go with the trouble. And with this assumption we could simplify a bit more the > spi implementation. I tried something like this in v3 but Jonathan didn't seem to like it. https://lore.kernel.org/all/20240727144303.4a8604cb@jic23-huawei/ > > And not even related but I even suspect the current implementation could be > problematic. Basically I'm suspecting that the lifetime of the DMA channel should be > attached to the lifetime of the iio_buffer. IOW, we should only release the channel > in iio_dmaengine_buffer_release() - in which case the current implementation with the > spi_offload would also be buggy. The buffer can outlive the iio device driver that created the buffer? > > But bah, the second point is completely theoretical and likely very hard to reproduce > in real life (if reproducible at all - for now it's only something I suspect) > > - Nuno Sá > >
© 2016 - 2024 Red Hat, Inc.