From: Radu Sabau <radu.sabau@analog.com>
Add SPI offload support to enable DMA-based, CPU-independent data
acquisition using the SPI Engine offload framework.
When an SPI offload is available (devm_spi_offload_get() succeeds),
the driver registers a DMA engine IIO buffer and uses dedicated buffer
setup operations. If no offload is available the existing software
triggered buffer path is used unchanged.
Both CNV Burst Mode and Manual Mode support offload, but use different
trigger mechanisms:
CNV Burst Mode: the SPI Engine is triggered by the ADC's DATA_READY
signal on the GP pin specified by the trigger-source consumer reference
in the device tree (one cell = GP pin number 0-3). For this mode the
driver acts as both an SPI offload consumer (DMA RX stream, message
optimization) and a trigger source provider: it registers the
GP/DATA_READY output via devm_spi_offload_trigger_register() so the
offload framework can match the '#trigger-source-cells' phandle and
automatically fire the SPI Engine DMA transfer at end-of-conversion.
Manual Mode: the SPI Engine is triggered by a periodic trigger at
the configured sampling frequency. The pre-built SPI message uses
the pipelined CNV-on-CS protocol: N+1 4-byte transfers are issued
for N active channels (the first result is discarded as garbage from
the pipeline flush) and the remaining N results are captured by DMA.
All offload transfers use 32-bit frames (bits_per_word=32, len=4) for
DMA word alignment. This patch promotes the channel scan_type from
storagebits=16 (triggered-buffer path) to storagebits=32 to match the
DMA word size; the triggered-buffer paths are updated to the same layout
for consistency. CNV Burst Mode channel data arrives in the lower 16
bits of the 32-bit word (shift=0); Manual Mode data arrives in the upper
16 bits (shift=16), matching the 4-byte SPI transfer layout
[data_hi, data_lo, 0, 0]. A separate ad4691_manual_channels[] array
encodes the shift=16 scan type for manual mode.
Kconfig gains a dependency on IIO_BUFFER_DMAENGINE.
Signed-off-by: Radu Sabau <radu.sabau@analog.com>
---
drivers/iio/adc/Kconfig | 1 +
drivers/iio/adc/ad4691.c | 470 +++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 435 insertions(+), 36 deletions(-)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index d498f16c0816..93f090e9a562 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -144,6 +144,7 @@ config AD4691
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select IIO_BUFFER_DMAENGINE
select REGMAP
help
Say yes here to build support for Analog Devices AD4691 Family MuxSAR
diff --git a/drivers/iio/adc/ad4691.c b/drivers/iio/adc/ad4691.c
index db776de32846..5e0fe993c17d 100644
--- a/drivers/iio/adc/ad4691.c
+++ b/drivers/iio/adc/ad4691.c
@@ -8,6 +8,7 @@
#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/math.h>
@@ -19,10 +20,14 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
#include <linux/units.h>
#include <linux/unaligned.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer-dma.h>
+#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -37,6 +42,7 @@
#define AD4691_VREF_4P096_uV_MAX 4500000
#define AD4691_CNV_DUTY_CYCLE_NS 380
+#define AD4691_CNV_HIGH_TIME_NS 430
#define AD4691_SPI_CONFIG_A_REG 0x000
#define AD4691_SW_RESET (BIT(7) | BIT(0))
@@ -89,6 +95,12 @@
#define AD4691_ACC_IN(n) (0x252 + (3 * (n)))
#define AD4691_ACC_STS_DATA(n) (0x283 + (4 * (n)))
+/* SPI offload 32-bit word field masks (transmitted MSB first) */
+#define AD4691_OFFLOAD_BITS_PER_WORD 32
+#define AD4691_MSG_ADDR_HI GENMASK(31, 24)
+#define AD4691_MSG_ADDR_LO GENMASK(23, 16)
+#define AD4691_MSG_DATA GENMASK(15, 8)
+
enum ad4691_ref_ctrl {
AD4691_VREF_2P5 = 0,
AD4691_VREF_3P0 = 1,
@@ -99,12 +111,22 @@ enum ad4691_ref_ctrl {
struct ad4691_chip_info {
const struct iio_chan_spec *channels;
+ const struct iio_chan_spec *manual_channels;
const char *name;
unsigned int num_channels;
unsigned int max_rate;
};
-#define AD4691_CHANNEL(ch) \
+/*
+ * 16-bit ADC data is stored in 32-bit slots to match the SPI offload DMA
+ * word size (32 bits per transfer). The shift reflects the data position
+ * within the 32-bit word:
+ * CNV_BURST: RX = [dummy, dummy, data_hi, data_lo] -> shift = 0
+ * MANUAL: RX = [data_hi, data_lo, dummy, dummy] -> shift = 16
+ * The triggered-buffer paths store data in the same position for consistency.
+ * Do not "fix" storagebits to 16.
+ */
+#define AD4691_CHANNEL(ch, _shift) \
{ \
.type = IIO_VOLTAGE, \
.indexed = 1, \
@@ -118,40 +140,72 @@ struct ad4691_chip_info {
.scan_type = { \
.sign = 'u', \
.realbits = 16, \
- .storagebits = 16, \
- .shift = 0, \
+ .storagebits = 32, \
+ .shift = _shift, \
}, \
}
static const struct iio_chan_spec ad4691_channels[] = {
- AD4691_CHANNEL(0),
- AD4691_CHANNEL(1),
- AD4691_CHANNEL(2),
- AD4691_CHANNEL(3),
- AD4691_CHANNEL(4),
- AD4691_CHANNEL(5),
- AD4691_CHANNEL(6),
- AD4691_CHANNEL(7),
- AD4691_CHANNEL(8),
- AD4691_CHANNEL(9),
- AD4691_CHANNEL(10),
- AD4691_CHANNEL(11),
- AD4691_CHANNEL(12),
- AD4691_CHANNEL(13),
- AD4691_CHANNEL(14),
- AD4691_CHANNEL(15),
+ AD4691_CHANNEL(0, 0),
+ AD4691_CHANNEL(1, 0),
+ AD4691_CHANNEL(2, 0),
+ AD4691_CHANNEL(3, 0),
+ AD4691_CHANNEL(4, 0),
+ AD4691_CHANNEL(5, 0),
+ AD4691_CHANNEL(6, 0),
+ AD4691_CHANNEL(7, 0),
+ AD4691_CHANNEL(8, 0),
+ AD4691_CHANNEL(9, 0),
+ AD4691_CHANNEL(10, 0),
+ AD4691_CHANNEL(11, 0),
+ AD4691_CHANNEL(12, 0),
+ AD4691_CHANNEL(13, 0),
+ AD4691_CHANNEL(14, 0),
+ AD4691_CHANNEL(15, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(16),
+};
+
+static const struct iio_chan_spec ad4691_manual_channels[] = {
+ AD4691_CHANNEL(0, 16),
+ AD4691_CHANNEL(1, 16),
+ AD4691_CHANNEL(2, 16),
+ AD4691_CHANNEL(3, 16),
+ AD4691_CHANNEL(4, 16),
+ AD4691_CHANNEL(5, 16),
+ AD4691_CHANNEL(6, 16),
+ AD4691_CHANNEL(7, 16),
+ AD4691_CHANNEL(8, 16),
+ AD4691_CHANNEL(9, 16),
+ AD4691_CHANNEL(10, 16),
+ AD4691_CHANNEL(11, 16),
+ AD4691_CHANNEL(12, 16),
+ AD4691_CHANNEL(13, 16),
+ AD4691_CHANNEL(14, 16),
+ AD4691_CHANNEL(15, 16),
IIO_CHAN_SOFT_TIMESTAMP(16),
};
static const struct iio_chan_spec ad4693_channels[] = {
- AD4691_CHANNEL(0),
- AD4691_CHANNEL(1),
- AD4691_CHANNEL(2),
- AD4691_CHANNEL(3),
- AD4691_CHANNEL(4),
- AD4691_CHANNEL(5),
- AD4691_CHANNEL(6),
- AD4691_CHANNEL(7),
+ AD4691_CHANNEL(0, 0),
+ AD4691_CHANNEL(1, 0),
+ AD4691_CHANNEL(2, 0),
+ AD4691_CHANNEL(3, 0),
+ AD4691_CHANNEL(4, 0),
+ AD4691_CHANNEL(5, 0),
+ AD4691_CHANNEL(6, 0),
+ AD4691_CHANNEL(7, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(16),
+};
+
+static const struct iio_chan_spec ad4693_manual_channels[] = {
+ AD4691_CHANNEL(0, 16),
+ AD4691_CHANNEL(1, 16),
+ AD4691_CHANNEL(2, 16),
+ AD4691_CHANNEL(3, 16),
+ AD4691_CHANNEL(4, 16),
+ AD4691_CHANNEL(5, 16),
+ AD4691_CHANNEL(6, 16),
+ AD4691_CHANNEL(7, 16),
IIO_CHAN_SOFT_TIMESTAMP(16),
};
@@ -181,6 +235,7 @@ static const unsigned int ad4691_osc_freqs[] = {
static const struct ad4691_chip_info ad4691_chip_info = {
.channels = ad4691_channels,
+ .manual_channels = ad4691_manual_channels,
.name = "ad4691",
.num_channels = ARRAY_SIZE(ad4691_channels),
.max_rate = 500 * HZ_PER_KHZ,
@@ -188,6 +243,7 @@ static const struct ad4691_chip_info ad4691_chip_info = {
static const struct ad4691_chip_info ad4692_chip_info = {
.channels = ad4691_channels,
+ .manual_channels = ad4691_manual_channels,
.name = "ad4692",
.num_channels = ARRAY_SIZE(ad4691_channels),
.max_rate = 1 * HZ_PER_MHZ,
@@ -195,6 +251,7 @@ static const struct ad4691_chip_info ad4692_chip_info = {
static const struct ad4691_chip_info ad4693_chip_info = {
.channels = ad4693_channels,
+ .manual_channels = ad4693_manual_channels,
.name = "ad4693",
.num_channels = ARRAY_SIZE(ad4693_channels),
.max_rate = 500 * HZ_PER_KHZ,
@@ -202,6 +259,7 @@ static const struct ad4691_chip_info ad4693_chip_info = {
static const struct ad4691_chip_info ad4694_chip_info = {
.channels = ad4693_channels,
+ .manual_channels = ad4693_manual_channels,
.name = "ad4694",
.num_channels = ARRAY_SIZE(ad4693_channels),
.max_rate = 1 * HZ_PER_MHZ,
@@ -227,9 +285,9 @@ struct ad4691_state {
*/
struct mutex lock;
/*
- * Per-buffer-enabl ree lifetimesources:
- * Manual Mode - a pre-built SPI message that clocks out N+1
- * transfers in one go.
+ * Per-buffer-enable lifetime resources (triggered-buffer paths):
+ * Manual Mode - a pre-built SPI message that clocks out N+1
+ * transfers in one go.
* CNV Burst Mode - a pre-built SPI message that clocks out 2*N
* transfers in one go.
*/
@@ -238,9 +296,19 @@ struct ad4691_state {
struct spi_transfer *scan_xfers;
__be16 *scan_tx;
__be16 *scan_rx;
- /* Scan buffer: one slot per channel (u16) plus timestamp */
+ /* SPI offload DMA path resources */
+ struct spi_offload *offload;
+ /* SPI offload trigger - periodic (MANUAL) or DATA_READY (CNV_BURST) */
+ struct spi_offload_trigger *offload_trigger;
+ u64 offload_trigger_hz;
+ struct spi_message offload_msg;
+ /* Max 16 channel xfers + 1 state-reset or NOOP */
+ struct spi_transfer offload_xfer[17];
+ u32 offload_tx_cmd[17];
+ u32 offload_tx_reset;
+ /* Scan buffer: one slot per channel (u32) plus timestamp */
struct {
- u16 vals[16];
+ u32 vals[16];
s64 ts __aligned(8);
} scan __aligned(IIO_DMA_MINALIGN);
};
@@ -260,6 +328,46 @@ static int ad4691_gpio_setup(struct ad4691_state *st, unsigned int gp_num)
AD4691_GP_MODE_DATA_READY << shift);
}
+static const struct spi_offload_config ad4691_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
+};
+
+static bool ad4691_offload_trigger_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ return type == SPI_OFFLOAD_TRIGGER_DATA_READY &&
+ nargs == 1 && args[0] <= 3;
+}
+
+static int ad4691_offload_trigger_request(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ struct ad4691_state *st = spi_offload_trigger_get_priv(trigger);
+
+ if (nargs != 1)
+ return -EINVAL;
+
+ return ad4691_gpio_setup(st, (unsigned int)args[0]);
+}
+
+static int ad4691_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ if (config->type != SPI_OFFLOAD_TRIGGER_DATA_READY)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct spi_offload_trigger_ops ad4691_offload_trigger_ops = {
+ .match = ad4691_offload_trigger_match,
+ .request = ad4691_offload_trigger_request,
+ .validate = ad4691_offload_trigger_validate,
+};
+
static void ad4691_disable_pwm(void *data)
{
struct pwm_state state = { .enabled = false };
@@ -817,6 +925,206 @@ static const struct iio_buffer_setup_ops ad4691_cnv_burst_buffer_setup_ops = {
.postdisable = &ad4691_cnv_burst_buffer_postdisable,
};
+static int ad4691_manual_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad4691_state *st = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(st->regmap);
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ };
+ unsigned int bit, k;
+ int ret;
+
+ ret = ad4691_enter_conversion_mode(st);
+ if (ret)
+ return ret;
+
+ memset(st->offload_xfer, 0, sizeof(st->offload_xfer));
+
+ /*
+ * N+1 transfers for N channels. Each CS-low period triggers
+ * a conversion AND returns the previous result (pipelined).
+ * TX: [AD4691_ADC_CHAN(n), 0x00, 0x00, 0x00]
+ * RX: [data_hi, data_lo, 0x00, 0x00] (shift=16)
+ * Transfer 0 RX is garbage; transfers 1..N carry real data.
+ */
+ k = 0;
+ iio_for_each_active_channel(indio_dev, bit) {
+ st->offload_tx_cmd[k] =
+ cpu_to_be32(FIELD_PREP(AD4691_MSG_ADDR_HI,
+ AD4691_ADC_CHAN(bit)));
+ st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
+ st->offload_xfer[k].len = sizeof(u32);
+ st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
+ st->offload_xfer[k].cs_change = 1;
+ st->offload_xfer[k].cs_change_delay.value = AD4691_CNV_HIGH_TIME_NS;
+ st->offload_xfer[k].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ /* First transfer RX is garbage — skip it. */
+ if (k > 0)
+ st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ k++;
+ }
+
+ /* Final NOOP to flush pipeline and capture last channel. */
+ st->offload_tx_cmd[k] =
+ cpu_to_be32(FIELD_PREP(AD4691_MSG_ADDR_HI, AD4691_NOOP));
+ st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
+ st->offload_xfer[k].len = sizeof(u32);
+ st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
+ st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ k++;
+
+ spi_message_init_with_transfers(&st->offload_msg, st->offload_xfer, k);
+ st->offload_msg.offload = st->offload;
+
+ ret = spi_optimize_message(spi, &st->offload_msg);
+ if (ret)
+ goto err_exit_conversion;
+
+ config.periodic.frequency_hz = st->offload_trigger_hz;
+ ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
+ if (ret)
+ goto err_unoptimize;
+
+ return 0;
+
+err_unoptimize:
+ spi_unoptimize_message(&st->offload_msg);
+err_exit_conversion:
+ ad4691_exit_conversion_mode(st);
+ return ret;
+}
+
+static int ad4691_manual_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad4691_state *st = iio_priv(indio_dev);
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+ spi_unoptimize_message(&st->offload_msg);
+
+ return ad4691_exit_conversion_mode(st);
+}
+
+static const struct iio_buffer_setup_ops ad4691_manual_offload_buffer_setup_ops = {
+ .postenable = &ad4691_manual_offload_buffer_postenable,
+ .predisable = &ad4691_manual_offload_buffer_predisable,
+};
+
+static int ad4691_cnv_burst_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad4691_state *st = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(st->regmap);
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_DATA_READY,
+ };
+ unsigned int n_active = hweight_long(*indio_dev->active_scan_mask);
+ unsigned int bit, k;
+ int ret;
+
+ ret = regmap_write(st->regmap, AD4691_ACC_MASK_REG,
+ (u16)~(*indio_dev->active_scan_mask));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4691_STD_SEQ_CONFIG,
+ *indio_dev->active_scan_mask);
+ if (ret)
+ return ret;
+
+ iio_for_each_active_channel(indio_dev, bit) {
+ ret = regmap_write(st->regmap, AD4691_ACC_COUNT_LIMIT(bit),
+ AD4691_ACC_COUNT_VAL);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad4691_enter_conversion_mode(st);
+ if (ret)
+ return ret;
+
+ memset(st->offload_xfer, 0, sizeof(st->offload_xfer));
+
+ /*
+ * N transfers to read N AVG_IN registers plus one state-reset
+ * transfer (no RX) to re-arm DATA_READY.
+ * TX: [reg_hi | 0x80, reg_lo, 0x00, 0x00]
+ * RX: [0x00, 0x00, data_hi, data_lo] (shift=0)
+ */
+ k = 0;
+ iio_for_each_active_channel(indio_dev, bit) {
+ unsigned int reg = AD4691_AVG_IN(bit);
+
+ st->offload_tx_cmd[k] =
+ cpu_to_be32(((reg >> 8 | 0x80) << 24) |
+ ((reg & 0xFF) << 16));
+ st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
+ st->offload_xfer[k].len = sizeof(u32);
+ st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
+ st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ if (k < n_active - 1)
+ st->offload_xfer[k].cs_change = 1;
+ k++;
+ }
+
+ /* State reset to re-arm DATA_READY for the next scan. */
+ st->offload_tx_reset =
+ cpu_to_be32(((AD4691_STATE_RESET_REG >> 8) << 24) |
+ ((AD4691_STATE_RESET_REG & 0xFF) << 16) |
+ (AD4691_STATE_RESET_ALL << 8));
+ st->offload_xfer[k].tx_buf = &st->offload_tx_reset;
+ st->offload_xfer[k].len = sizeof(u32);
+ st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
+ k++;
+
+ spi_message_init_with_transfers(&st->offload_msg, st->offload_xfer, k);
+ st->offload_msg.offload = st->offload;
+
+ ret = spi_optimize_message(spi, &st->offload_msg);
+ if (ret)
+ goto err_exit_conversion;
+
+ ret = ad4691_sampling_enable(st, true);
+ if (ret)
+ goto err_unoptimize;
+
+ ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
+ if (ret)
+ goto err_sampling_disable;
+
+ return 0;
+
+err_sampling_disable:
+ ad4691_sampling_enable(st, false);
+err_unoptimize:
+ spi_unoptimize_message(&st->offload_msg);
+err_exit_conversion:
+ ad4691_exit_conversion_mode(st);
+ return ret;
+}
+
+static int ad4691_cnv_burst_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad4691_state *st = iio_priv(indio_dev);
+ int ret;
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ ret = ad4691_sampling_enable(st, false);
+ if (ret)
+ return ret;
+
+ spi_unoptimize_message(&st->offload_msg);
+
+ return ad4691_exit_conversion_mode(st);
+}
+
+static const struct iio_buffer_setup_ops ad4691_cnv_burst_offload_buffer_setup_ops = {
+ .postenable = &ad4691_cnv_burst_offload_buffer_postenable,
+ .predisable = &ad4691_cnv_burst_offload_buffer_predisable,
+};
+
static ssize_t sampling_frequency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -824,6 +1132,9 @@ static ssize_t sampling_frequency_show(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad4691_state *st = iio_priv(indio_dev);
+ if (st->manual_mode && st->offload)
+ return sysfs_emit(buf, "%llu\n", st->offload_trigger_hz);
+
if (st->manual_mode)
return -ENODEV;
@@ -838,7 +1149,7 @@ static ssize_t sampling_frequency_store(struct device *dev,
struct ad4691_state *st = iio_priv(indio_dev);
int freq, ret;
- if (st->manual_mode)
+ if (st->manual_mode && !st->offload)
return -ENODEV;
ret = kstrtoint(buf, 10, &freq);
@@ -847,6 +1158,20 @@ static ssize_t sampling_frequency_store(struct device *dev,
guard(mutex)(&st->lock);
+ if (st->manual_mode) {
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = { .frequency_hz = freq },
+ };
+
+ ret = spi_offload_trigger_validate(st->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ st->offload_trigger_hz = config.periodic.frequency_hz;
+ return len;
+ }
+
ret = ad4691_set_pwm_freq(st, freq);
if (ret)
return ret;
@@ -900,7 +1225,7 @@ static irqreturn_t ad4691_trigger_handler(int irq, void *p)
if (st->manual_mode) {
iio_for_each_active_channel(indio_dev, i) {
- st->scan.vals[i] = be16_to_cpu(st->scan_rx[k + 1]);
+ st->scan.vals[i] = (u32)be16_to_cpu(st->scan_rx[k + 1]) << 16;
k++;
}
} else {
@@ -1088,6 +1413,15 @@ static int ad4691_config(struct ad4691_state *st, u32 max_speed_hz)
if (st->manual_mode)
return 0;
+ /*
+ * In the offload CNV Burst path the GP pin is supplied by the trigger
+ * consumer via #trigger-source-cells; gpio_setup is called from
+ * ad4691_offload_trigger_request() instead. For the non-offload path
+ * derive the pin from the first interrupt-names entry (e.g. "gp0").
+ */
+ if (device_property_present(dev, "#trigger-source-cells"))
+ return 0;
+
ret = device_property_read_string_array(dev, "interrupt-names",
&irq_name, 1);
if (ret < 0)
@@ -1158,6 +1492,56 @@ static int ad4691_setup_triggered_buffer(struct iio_dev *indio_dev,
&ad4691_manual_buffer_setup_ops);
}
+static int ad4691_setup_offload(struct iio_dev *indio_dev,
+ struct ad4691_state *st)
+{
+ struct device *dev = regmap_get_device(st->regmap);
+ struct dma_chan *rx_dma;
+ int ret;
+
+ if (st->manual_mode) {
+ st->offload_trigger =
+ devm_spi_offload_trigger_get(dev, st->offload,
+ SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(st->offload_trigger),
+ "Failed to get periodic offload trigger\n");
+
+ st->offload_trigger_hz = st->info->max_rate;
+ } else {
+ struct spi_offload_trigger_info trigger_info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &ad4691_offload_trigger_ops,
+ .priv = st,
+ };
+
+ ret = devm_spi_offload_trigger_register(dev, &trigger_info);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register offload trigger\n");
+
+ st->offload_trigger =
+ devm_spi_offload_trigger_get(dev, st->offload,
+ SPI_OFFLOAD_TRIGGER_DATA_READY);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(st->offload_trigger),
+ "Failed to get DATA_READY offload trigger\n");
+ }
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev, st->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "Failed to get offload RX DMA channel\n");
+
+ if (st->manual_mode)
+ indio_dev->setup_ops = &ad4691_manual_offload_buffer_setup_ops;
+ else
+ indio_dev->setup_ops = &ad4691_cnv_burst_offload_buffer_setup_ops;
+
+ return devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev, rx_dma,
+ IIO_BUFFER_DIRECTION_IN);
+}
+
static int ad4691_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -1193,14 +1577,27 @@ static int ad4691_probe(struct spi_device *spi)
if (ret)
return ret;
+ st->offload = devm_spi_offload_get(dev, spi, &ad4691_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret == -ENODEV)
+ st->offload = NULL;
+ else if (ret)
+ return dev_err_probe(dev, ret, "Failed to get SPI offload\n");
+
indio_dev->name = st->info->name;
indio_dev->info = &ad4691_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->info->channels;
+ if (st->manual_mode)
+ indio_dev->channels = st->info->manual_channels;
+ else
+ indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
- ret = ad4691_setup_triggered_buffer(indio_dev, st);
+ if (st->offload)
+ ret = ad4691_setup_offload(indio_dev, st);
+ else
+ ret = ad4691_setup_triggered_buffer(indio_dev, st);
if (ret)
return ret;
@@ -1238,3 +1635,4 @@ module_spi_driver(ad4691_driver);
MODULE_AUTHOR("Radu Sabau <radu.sabau@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD4691 Family ADC Driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMA_BUFFER");
--
2.43.0
Hi Radu, kernel test robot noticed the following build errors: [auto build test ERROR on 11439c4635edd669ae435eec308f4ab8a0804808] url: https://github.com/intel-lab-lkp/linux/commits/Radu-Sabau-via-B4-Relay/dt-bindings-iio-adc-add-AD4691-family/20260321-120718 base: 11439c4635edd669ae435eec308f4ab8a0804808 patch link: https://lore.kernel.org/r/20260320-ad4692-multichannel-sar-adc-driver-v4-4-052c1050507a%40analog.com patch subject: [PATCH v4 4/4] iio: adc: ad4691: add SPI offload support config: openrisc-randconfig-r063-20260325 (https://download.01.org/0day-ci/archive/20260325/202603251904.qcAGC4cf-lkp@intel.com/config) compiler: or1k-linux-gcc (GCC) 14.3.0 reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260325/202603251904.qcAGC4cf-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202603251904.qcAGC4cf-lkp@intel.com/ All errors (new ones prefixed by >>, old ones prefixed by <<): >> ERROR: modpost: module ad4691 uses symbol devm_iio_dmaengine_buffer_setup_with_handle from namespace IIO_DMAENGINE_BUFFER, but does not import it. -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
Hi Radu, kernel test robot noticed the following build warnings: [auto build test WARNING on 11439c4635edd669ae435eec308f4ab8a0804808] url: https://github.com/intel-lab-lkp/linux/commits/Radu-Sabau-via-B4-Relay/dt-bindings-iio-adc-add-AD4691-family/20260321-120718 base: 11439c4635edd669ae435eec308f4ab8a0804808 patch link: https://lore.kernel.org/r/20260320-ad4692-multichannel-sar-adc-driver-v4-4-052c1050507a%40analog.com patch subject: [PATCH v4 4/4] iio: adc: ad4691: add SPI offload support config: hexagon-allmodconfig (https://download.01.org/0day-ci/archive/20260323/202603232017.8IO2whBG-lkp@intel.com/config) compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18) reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260323/202603232017.8IO2whBG-lkp@intel.com/reproduce) If you fix the issue in a separate patch/commit (i.e. not just a new version of the same patch/commit), kindly add following tags | Reported-by: kernel test robot <lkp@intel.com> | Closes: https://lore.kernel.org/oe-kbuild-all/202603232017.8IO2whBG-lkp@intel.com/ All warnings (new ones prefixed by >>, old ones prefixed by <<): >> WARNING: modpost: module ad4691 uses symbol devm_iio_dmaengine_buffer_setup_with_handle from namespace IIO_DMAENGINE_BUFFER, but does not import it. -- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests/wiki
Hi Radu,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 11439c4635edd669ae435eec308f4ab8a0804808]
url: https://github.com/intel-lab-lkp/linux/commits/Radu-Sabau-via-B4-Relay/dt-bindings-iio-adc-add-AD4691-family/20260321-120718
base: 11439c4635edd669ae435eec308f4ab8a0804808
patch link: https://lore.kernel.org/r/20260320-ad4692-multichannel-sar-adc-driver-v4-4-052c1050507a%40analog.com
patch subject: [PATCH v4 4/4] iio: adc: ad4691: add SPI offload support
config: nios2-randconfig-r122-20260322 (https://download.01.org/0day-ci/archive/20260322/202603221011.rzczUvUN-lkp@intel.com/config)
compiler: nios2-linux-gcc (GCC) 11.5.0
sparse: v0.6.5-rc1
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260322/202603221011.rzczUvUN-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202603221011.rzczUvUN-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
>> drivers/iio/adc/ad4691.c:954:39: sparse: sparse: incorrect type in assignment (different base types) @@ expected unsigned int @@ got restricted __be32 [usertype] @@
drivers/iio/adc/ad4691.c:954:39: sparse: expected unsigned int
drivers/iio/adc/ad4691.c:954:39: sparse: got restricted __be32 [usertype]
drivers/iio/adc/ad4691.c:970:31: sparse: sparse: incorrect type in assignment (different base types) @@ expected unsigned int @@ got restricted __be32 [usertype] @@
drivers/iio/adc/ad4691.c:970:31: sparse: expected unsigned int
drivers/iio/adc/ad4691.c:970:31: sparse: got restricted __be32 [usertype]
drivers/iio/adc/ad4691.c:1059:39: sparse: sparse: incorrect type in assignment (different base types) @@ expected unsigned int @@ got restricted __be32 [usertype] @@
drivers/iio/adc/ad4691.c:1059:39: sparse: expected unsigned int
drivers/iio/adc/ad4691.c:1059:39: sparse: got restricted __be32 [usertype]
>> drivers/iio/adc/ad4691.c:1072:30: sparse: sparse: incorrect type in assignment (different base types) @@ expected unsigned int [usertype] offload_tx_reset @@ got restricted __be32 [usertype] @@
drivers/iio/adc/ad4691.c:1072:30: sparse: expected unsigned int [usertype] offload_tx_reset
drivers/iio/adc/ad4691.c:1072:30: sparse: got restricted __be32 [usertype]
vim +954 drivers/iio/adc/ad4691.c
927
928 static int ad4691_manual_offload_buffer_postenable(struct iio_dev *indio_dev)
929 {
930 struct ad4691_state *st = iio_priv(indio_dev);
931 struct device *dev = regmap_get_device(st->regmap);
932 struct spi_device *spi = to_spi_device(dev);
933 struct spi_offload_trigger_config config = {
934 .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
935 };
936 unsigned int bit, k;
937 int ret;
938
939 ret = ad4691_enter_conversion_mode(st);
940 if (ret)
941 return ret;
942
943 memset(st->offload_xfer, 0, sizeof(st->offload_xfer));
944
945 /*
946 * N+1 transfers for N channels. Each CS-low period triggers
947 * a conversion AND returns the previous result (pipelined).
948 * TX: [AD4691_ADC_CHAN(n), 0x00, 0x00, 0x00]
949 * RX: [data_hi, data_lo, 0x00, 0x00] (shift=16)
950 * Transfer 0 RX is garbage; transfers 1..N carry real data.
951 */
952 k = 0;
953 iio_for_each_active_channel(indio_dev, bit) {
> 954 st->offload_tx_cmd[k] =
955 cpu_to_be32(FIELD_PREP(AD4691_MSG_ADDR_HI,
956 AD4691_ADC_CHAN(bit)));
957 st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
958 st->offload_xfer[k].len = sizeof(u32);
959 st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
960 st->offload_xfer[k].cs_change = 1;
961 st->offload_xfer[k].cs_change_delay.value = AD4691_CNV_HIGH_TIME_NS;
962 st->offload_xfer[k].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
963 /* First transfer RX is garbage — skip it. */
964 if (k > 0)
965 st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
966 k++;
967 }
968
969 /* Final NOOP to flush pipeline and capture last channel. */
970 st->offload_tx_cmd[k] =
971 cpu_to_be32(FIELD_PREP(AD4691_MSG_ADDR_HI, AD4691_NOOP));
972 st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
973 st->offload_xfer[k].len = sizeof(u32);
974 st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
975 st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
976 k++;
977
978 spi_message_init_with_transfers(&st->offload_msg, st->offload_xfer, k);
979 st->offload_msg.offload = st->offload;
980
981 ret = spi_optimize_message(spi, &st->offload_msg);
982 if (ret)
983 goto err_exit_conversion;
984
985 config.periodic.frequency_hz = st->offload_trigger_hz;
986 ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
987 if (ret)
988 goto err_unoptimize;
989
990 return 0;
991
992 err_unoptimize:
993 spi_unoptimize_message(&st->offload_msg);
994 err_exit_conversion:
995 ad4691_exit_conversion_mode(st);
996 return ret;
997 }
998
999 static int ad4691_manual_offload_buffer_predisable(struct iio_dev *indio_dev)
1000 {
1001 struct ad4691_state *st = iio_priv(indio_dev);
1002
1003 spi_offload_trigger_disable(st->offload, st->offload_trigger);
1004 spi_unoptimize_message(&st->offload_msg);
1005
1006 return ad4691_exit_conversion_mode(st);
1007 }
1008
1009 static const struct iio_buffer_setup_ops ad4691_manual_offload_buffer_setup_ops = {
1010 .postenable = &ad4691_manual_offload_buffer_postenable,
1011 .predisable = &ad4691_manual_offload_buffer_predisable,
1012 };
1013
1014 static int ad4691_cnv_burst_offload_buffer_postenable(struct iio_dev *indio_dev)
1015 {
1016 struct ad4691_state *st = iio_priv(indio_dev);
1017 struct device *dev = regmap_get_device(st->regmap);
1018 struct spi_device *spi = to_spi_device(dev);
1019 struct spi_offload_trigger_config config = {
1020 .type = SPI_OFFLOAD_TRIGGER_DATA_READY,
1021 };
1022 unsigned int n_active = hweight_long(*indio_dev->active_scan_mask);
1023 unsigned int bit, k;
1024 int ret;
1025
1026 ret = regmap_write(st->regmap, AD4691_ACC_MASK_REG,
1027 (u16)~(*indio_dev->active_scan_mask));
1028 if (ret)
1029 return ret;
1030
1031 ret = regmap_write(st->regmap, AD4691_STD_SEQ_CONFIG,
1032 *indio_dev->active_scan_mask);
1033 if (ret)
1034 return ret;
1035
1036 iio_for_each_active_channel(indio_dev, bit) {
1037 ret = regmap_write(st->regmap, AD4691_ACC_COUNT_LIMIT(bit),
1038 AD4691_ACC_COUNT_VAL);
1039 if (ret)
1040 return ret;
1041 }
1042
1043 ret = ad4691_enter_conversion_mode(st);
1044 if (ret)
1045 return ret;
1046
1047 memset(st->offload_xfer, 0, sizeof(st->offload_xfer));
1048
1049 /*
1050 * N transfers to read N AVG_IN registers plus one state-reset
1051 * transfer (no RX) to re-arm DATA_READY.
1052 * TX: [reg_hi | 0x80, reg_lo, 0x00, 0x00]
1053 * RX: [0x00, 0x00, data_hi, data_lo] (shift=0)
1054 */
1055 k = 0;
1056 iio_for_each_active_channel(indio_dev, bit) {
1057 unsigned int reg = AD4691_AVG_IN(bit);
1058
1059 st->offload_tx_cmd[k] =
1060 cpu_to_be32(((reg >> 8 | 0x80) << 24) |
1061 ((reg & 0xFF) << 16));
1062 st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
1063 st->offload_xfer[k].len = sizeof(u32);
1064 st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
1065 st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
1066 if (k < n_active - 1)
1067 st->offload_xfer[k].cs_change = 1;
1068 k++;
1069 }
1070
1071 /* State reset to re-arm DATA_READY for the next scan. */
> 1072 st->offload_tx_reset =
1073 cpu_to_be32(((AD4691_STATE_RESET_REG >> 8) << 24) |
1074 ((AD4691_STATE_RESET_REG & 0xFF) << 16) |
1075 (AD4691_STATE_RESET_ALL << 8));
1076 st->offload_xfer[k].tx_buf = &st->offload_tx_reset;
1077 st->offload_xfer[k].len = sizeof(u32);
1078 st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
1079 k++;
1080
1081 spi_message_init_with_transfers(&st->offload_msg, st->offload_xfer, k);
1082 st->offload_msg.offload = st->offload;
1083
1084 ret = spi_optimize_message(spi, &st->offload_msg);
1085 if (ret)
1086 goto err_exit_conversion;
1087
1088 ret = ad4691_sampling_enable(st, true);
1089 if (ret)
1090 goto err_unoptimize;
1091
1092 ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
1093 if (ret)
1094 goto err_sampling_disable;
1095
1096 return 0;
1097
1098 err_sampling_disable:
1099 ad4691_sampling_enable(st, false);
1100 err_unoptimize:
1101 spi_unoptimize_message(&st->offload_msg);
1102 err_exit_conversion:
1103 ad4691_exit_conversion_mode(st);
1104 return ret;
1105 }
1106
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On Fri, 20 Mar 2026 13:03:58 +0200
Radu Sabau via B4 Relay <devnull+radu.sabau.analog.com@kernel.org> wrote:
> From: Radu Sabau <radu.sabau@analog.com>
>
> Add SPI offload support to enable DMA-based, CPU-independent data
> acquisition using the SPI Engine offload framework.
>
> When an SPI offload is available (devm_spi_offload_get() succeeds),
> the driver registers a DMA engine IIO buffer and uses dedicated buffer
> setup operations. If no offload is available the existing software
> triggered buffer path is used unchanged.
>
> Both CNV Burst Mode and Manual Mode support offload, but use different
> trigger mechanisms:
>
> CNV Burst Mode: the SPI Engine is triggered by the ADC's DATA_READY
> signal on the GP pin specified by the trigger-source consumer reference
> in the device tree (one cell = GP pin number 0-3). For this mode the
> driver acts as both an SPI offload consumer (DMA RX stream, message
> optimization) and a trigger source provider: it registers the
> GP/DATA_READY output via devm_spi_offload_trigger_register() so the
> offload framework can match the '#trigger-source-cells' phandle and
> automatically fire the SPI Engine DMA transfer at end-of-conversion.
>
> Manual Mode: the SPI Engine is triggered by a periodic trigger at
> the configured sampling frequency. The pre-built SPI message uses
> the pipelined CNV-on-CS protocol: N+1 4-byte transfers are issued
> for N active channels (the first result is discarded as garbage from
> the pipeline flush) and the remaining N results are captured by DMA.
>
> All offload transfers use 32-bit frames (bits_per_word=32, len=4) for
> DMA word alignment. This patch promotes the channel scan_type from
> storagebits=16 (triggered-buffer path) to storagebits=32 to match the
> DMA word size; the triggered-buffer paths are updated to the same layout
> for consistency.
That's quite a large cost for the kfifo sizing, particularly if timestamps
are enabled. I think I'd prefer we kept the exiting paths using 16 bit data
storage for each channel.
> CNV Burst Mode channel data arrives in the lower 16
> bits of the 32-bit word (shift=0); Manual Mode data arrives in the upper
> 16 bits (shift=16), matching the 4-byte SPI transfer layout
> [data_hi, data_lo, 0, 0]. A separate ad4691_manual_channels[] array
> encodes the shift=16 scan type for manual mode.
That's odd - but fair enough if that's what the IP ends up doing.
>
> Kconfig gains a dependency on IIO_BUFFER_DMAENGINE.
>
> Signed-off-by: Radu Sabau <radu.sabau@analog.com>
Various other minor comments inline.
> -#define AD4691_CHANNEL(ch) \
> +/*
> + * 16-bit ADC data is stored in 32-bit slots to match the SPI offload DMA
> + * word size (32 bits per transfer). The shift reflects the data position
As mentioned elsewhere, I don't think we care about matching the offload
layout. Lots of existing drivers don't and if we can we want to minimize
wasted space whilst still keep the data naturally aligned to make accesses easy.
> + * within the 32-bit word:
> + * CNV_BURST: RX = [dummy, dummy, data_hi, data_lo] -> shift = 0
> + * MANUAL: RX = [data_hi, data_lo, dummy, dummy] -> shift = 16
> + * The triggered-buffer paths store data in the same position for consistency.
> + * Do not "fix" storagebits to 16.
> + */
> +#define AD4691_CHANNEL(ch, _shift) \
> { \
> .type = IIO_VOLTAGE, \
> .indexed = 1, \
> @@ -118,40 +140,72 @@ struct ad4691_chip_info {
> .scan_type = { \
> .sign = 'u', \
> .realbits = 16, \
> - .storagebits = 16, \
> - .shift = 0, \
> + .storagebits = 32, \
> + .shift = _shift, \
> }, \
> }
> @@ -227,9 +285,9 @@ struct ad4691_state {
> */
> struct mutex lock;
> /*
> - * Per-buffer-enabl ree lifetimesources:
> - * Manual Mode - a pre-built SPI message that clocks out N+1
> - * transfers in one go.
> + * Per-buffer-enable lifetime resources (triggered-buffer paths):
> + * Manual Mode - a pre-built SPI message that clocks out N+1
> + * transfers in one go.
> * CNV Burst Mode - a pre-built SPI message that clocks out 2*N
> * transfers in one go.
> */
> @@ -238,9 +296,19 @@ struct ad4691_state {
> struct spi_transfer *scan_xfers;
> __be16 *scan_tx;
> __be16 *scan_rx;
> - /* Scan buffer: one slot per channel (u16) plus timestamp */
> + /* SPI offload DMA path resources */
> + struct spi_offload *offload;
> + /* SPI offload trigger - periodic (MANUAL) or DATA_READY (CNV_BURST) */
> + struct spi_offload_trigger *offload_trigger;
> + u64 offload_trigger_hz;
> + struct spi_message offload_msg;
> + /* Max 16 channel xfers + 1 state-reset or NOOP */
> + struct spi_transfer offload_xfer[17];
> + u32 offload_tx_cmd[17];
Andy already commented on this being large. Allocating separately
probably makes sense.
> + u32 offload_tx_reset;
> + /* Scan buffer: one slot per channel (u32) plus timestamp */
> struct {
> - u16 vals[16];
> + u32 vals[16];
> s64 ts __aligned(8);
> } scan __aligned(IIO_DMA_MINALIGN);
> };
> +static int ad4691_cnv_burst_offload_buffer_postenable(struct iio_dev *indio_dev)
> +{
> + struct ad4691_state *st = iio_priv(indio_dev);
> + struct device *dev = regmap_get_device(st->regmap);
> + struct spi_device *spi = to_spi_device(dev);
> + struct spi_offload_trigger_config config = {
> + .type = SPI_OFFLOAD_TRIGGER_DATA_READY,
> + };
> + unsigned int n_active = hweight_long(*indio_dev->active_scan_mask);
> + unsigned int bit, k;
> + int ret;
> +
> + ret = regmap_write(st->regmap, AD4691_ACC_MASK_REG,
> + (u16)~(*indio_dev->active_scan_mask));
> + if (ret)
> + return ret;
> +
> + ret = regmap_write(st->regmap, AD4691_STD_SEQ_CONFIG,
> + *indio_dev->active_scan_mask);
> + if (ret)
> + return ret;
> +
> + iio_for_each_active_channel(indio_dev, bit) {
> + ret = regmap_write(st->regmap, AD4691_ACC_COUNT_LIMIT(bit),
> + AD4691_ACC_COUNT_VAL);
> + if (ret)
> + return ret;
> + }
> +
> + ret = ad4691_enter_conversion_mode(st);
> + if (ret)
> + return ret;
> +
> + memset(st->offload_xfer, 0, sizeof(st->offload_xfer));
> +
> + /*
> + * N transfers to read N AVG_IN registers plus one state-reset
> + * transfer (no RX) to re-arm DATA_READY.
> + * TX: [reg_hi | 0x80, reg_lo, 0x00, 0x00]
> + * RX: [0x00, 0x00, data_hi, data_lo] (shift=0)
> + */
> + k = 0;
> + iio_for_each_active_channel(indio_dev, bit) {
> + unsigned int reg = AD4691_AVG_IN(bit);
> +
> + st->offload_tx_cmd[k] =
> + cpu_to_be32(((reg >> 8 | 0x80) << 24) |
> + ((reg & 0xFF) << 16));
This is an odd looking construct. Maybe it's worth casting offload_tx_cmd[k] to
a u8 * and just filling the two bytes in directly.
> + st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
> + st->offload_xfer[k].len = sizeof(u32);
> + st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
> + st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
> + if (k < n_active - 1)
> + st->offload_xfer[k].cs_change = 1;
> + k++;
> + }
> +
> + /* State reset to re-arm DATA_READY for the next scan. */
> + st->offload_tx_reset =
> + cpu_to_be32(((AD4691_STATE_RESET_REG >> 8) << 24) |
> + ((AD4691_STATE_RESET_REG & 0xFF) << 16) |
> + (AD4691_STATE_RESET_ALL << 8));
Similar to above. Feels like we should be manipulating this as a u8[4]
> + st->offload_xfer[k].tx_buf = &st->offload_tx_reset;
> + st->offload_xfer[k].len = sizeof(u32);
> + st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
> + k++;
> +
> + spi_message_init_with_transfers(&st->offload_msg, st->offload_xfer, k);
> + st->offload_msg.offload = st->offload;
> +
> + ret = spi_optimize_message(spi, &st->offload_msg);
> + if (ret)
> + goto err_exit_conversion;
> +
> + ret = ad4691_sampling_enable(st, true);
> + if (ret)
> + goto err_unoptimize;
> +
> + ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
> + if (ret)
> + goto err_sampling_disable;
> +
> + return 0;
> +
> +err_sampling_disable:
> + ad4691_sampling_enable(st, false);
> +err_unoptimize:
> + spi_unoptimize_message(&st->offload_msg);
> +err_exit_conversion:
> + ad4691_exit_conversion_mode(st);
> + return ret;
> +}
On Fri, Mar 20, 2026 at 01:03:58PM +0200, Radu Sabau via B4 Relay wrote:
> Add SPI offload support to enable DMA-based, CPU-independent data
> acquisition using the SPI Engine offload framework.
>
> When an SPI offload is available (devm_spi_offload_get() succeeds),
> the driver registers a DMA engine IIO buffer and uses dedicated buffer
> setup operations. If no offload is available the existing software
> triggered buffer path is used unchanged.
>
> Both CNV Burst Mode and Manual Mode support offload, but use different
> trigger mechanisms:
>
> CNV Burst Mode: the SPI Engine is triggered by the ADC's DATA_READY
> signal on the GP pin specified by the trigger-source consumer reference
> in the device tree (one cell = GP pin number 0-3). For this mode the
> driver acts as both an SPI offload consumer (DMA RX stream, message
> optimization) and a trigger source provider: it registers the
> GP/DATA_READY output via devm_spi_offload_trigger_register() so the
> offload framework can match the '#trigger-source-cells' phandle and
> automatically fire the SPI Engine DMA transfer at end-of-conversion.
>
> Manual Mode: the SPI Engine is triggered by a periodic trigger at
> the configured sampling frequency. The pre-built SPI message uses
> the pipelined CNV-on-CS protocol: N+1 4-byte transfers are issued
> for N active channels (the first result is discarded as garbage from
> the pipeline flush) and the remaining N results are captured by DMA.
>
> All offload transfers use 32-bit frames (bits_per_word=32, len=4) for
> DMA word alignment. This patch promotes the channel scan_type from
> storagebits=16 (triggered-buffer path) to storagebits=32 to match the
> DMA word size; the triggered-buffer paths are updated to the same layout
> for consistency. CNV Burst Mode channel data arrives in the lower 16
> bits of the 32-bit word (shift=0); Manual Mode data arrives in the upper
> 16 bits (shift=16), matching the 4-byte SPI transfer layout
> [data_hi, data_lo, 0, 0]. A separate ad4691_manual_channels[] array
> encodes the shift=16 scan type for manual mode.
>
> Kconfig gains a dependency on IIO_BUFFER_DMAENGINE.
...
> + struct spi_offload *offload;
> + /* SPI offload trigger - periodic (MANUAL) or DATA_READY (CNV_BURST) */
> + struct spi_offload_trigger *offload_trigger;
> + u64 offload_trigger_hz;
> + struct spi_message offload_msg;
> + /* Max 16 channel xfers + 1 state-reset or NOOP */
> + struct spi_transfer offload_xfer[17];
> + u32 offload_tx_cmd[17];
> + u32 offload_tx_reset;
Ouch! Can you guarantee this kilobytes (isn't it?) of memory will be used in
majority of the cases? When I got comment on replacing a single u8 by unsigned
long in one well used data structure in the kernel I was laughing, but this
single driver may beat the recode of memory waste on the embedded platforms.
Perhaps having a separate structure and allocate it separately when we sure
the offload is supported?
Cc'ed to Wolfram.
...
> + /* Scan buffer: one slot per channel (u32) plus timestamp */
> struct {
> - u16 vals[16];
> + u32 vals[16];
> s64 ts __aligned(8);
This might break the existing cases or make code ugly and not actually
compatible with u32 layout.
> } scan __aligned(IIO_DMA_MINALIGN);
...
> +static int ad4691_cnv_burst_offload_buffer_postenable(struct iio_dev *indio_dev)
> +{
> + struct ad4691_state *st = iio_priv(indio_dev);
> + struct device *dev = regmap_get_device(st->regmap);
> + struct spi_device *spi = to_spi_device(dev);
> + struct spi_offload_trigger_config config = {
> + .type = SPI_OFFLOAD_TRIGGER_DATA_READY,
> + };
> + unsigned int n_active = hweight_long(*indio_dev->active_scan_mask);
Should be bitmap_weight() with properly given amount of bits.
> + unsigned int bit, k;
> + int ret;
> +
> + ret = regmap_write(st->regmap, AD4691_ACC_MASK_REG,
> + (u16)~(*indio_dev->active_scan_mask));
This is not how we work with bitmaps. Use bitmap_read().
> + if (ret)
> + return ret;
> +
> + ret = regmap_write(st->regmap, AD4691_STD_SEQ_CONFIG,
> + *indio_dev->active_scan_mask);
Ditto.
> + if (ret)
> + return ret;
> +
> + iio_for_each_active_channel(indio_dev, bit) {
> + ret = regmap_write(st->regmap, AD4691_ACC_COUNT_LIMIT(bit),
> + AD4691_ACC_COUNT_VAL);
> + if (ret)
> + return ret;
> + }
> +
> + ret = ad4691_enter_conversion_mode(st);
> + if (ret)
> + return ret;
> +
> + memset(st->offload_xfer, 0, sizeof(st->offload_xfer));
> +
> + /*
> + * N transfers to read N AVG_IN registers plus one state-reset
> + * transfer (no RX) to re-arm DATA_READY.
> + * TX: [reg_hi | 0x80, reg_lo, 0x00, 0x00]
> + * RX: [0x00, 0x00, data_hi, data_lo] (shift=0)
> + */
> + k = 0;
> + iio_for_each_active_channel(indio_dev, bit) {
> + unsigned int reg = AD4691_AVG_IN(bit);
> +
> + st->offload_tx_cmd[k] =
> + cpu_to_be32(((reg >> 8 | 0x80) << 24) |
> + ((reg & 0xFF) << 16));
Isn't this is just a cpu_to_be16(0x8000 | reg) ?
> + st->offload_xfer[k].tx_buf = &st->offload_tx_cmd[k];
> + st->offload_xfer[k].len = sizeof(u32);
> + st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
> + st->offload_xfer[k].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
> + if (k < n_active - 1)
> + st->offload_xfer[k].cs_change = 1;
> + k++;
> + }
> +
> + /* State reset to re-arm DATA_READY for the next scan. */
> + st->offload_tx_reset =
> + cpu_to_be32(((AD4691_STATE_RESET_REG >> 8) << 24) |
> + ((AD4691_STATE_RESET_REG & 0xFF) << 16) |
> + (AD4691_STATE_RESET_ALL << 8));
In similar way
cpu_to_be32((AD4691_STATE_RESET_REG << 16) |
(AD4691_STATE_RESET_ALL << 8));
> + st->offload_xfer[k].tx_buf = &st->offload_tx_reset;
> + st->offload_xfer[k].len = sizeof(u32);
> + st->offload_xfer[k].bits_per_word = AD4691_OFFLOAD_BITS_PER_WORD;
> + k++;
> +
> + spi_message_init_with_transfers(&st->offload_msg, st->offload_xfer, k);
> + st->offload_msg.offload = st->offload;
> +
> + ret = spi_optimize_message(spi, &st->offload_msg);
> + if (ret)
> + goto err_exit_conversion;
> +
> + ret = ad4691_sampling_enable(st, true);
> + if (ret)
> + goto err_unoptimize;
> +
> + ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
> + if (ret)
> + goto err_sampling_disable;
> +
> + return 0;
> +
> +err_sampling_disable:
> + ad4691_sampling_enable(st, false);
> +err_unoptimize:
> + spi_unoptimize_message(&st->offload_msg);
> +err_exit_conversion:
> + ad4691_exit_conversion_mode(st);
> + return ret;
> +}
--
With Best Regards,
Andy Shevchenko
© 2016 - 2026 Red Hat, Inc.