AXI DMA driver incorrectly assumes complete transfer completion upon
IRQ reception, particularly problematic when IRQ coalescing is active.
Updating the tail pointer dynamically fixes it.
Remove existing idle state validation in the beginning of
xilinx_dma_start_transfer() as it blocks valid transfer initiation on
busy channels with queued descriptors.
Additionally, refactor xilinx_dma_start_transfer() to consolidate coalesce
and delay configurations while conditionally starting channels
only when idle.
Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com>
Fixes: Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine")
---
drivers/dma/xilinx/xilinx_dma.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a34d8f0ceed8..187749b7b8a6 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1548,9 +1548,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (list_empty(&chan->pending_list))
return;
- if (!chan->idle)
- return;
-
head_desc = list_first_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
tail_desc = list_last_entry(&chan->pending_list,
@@ -1558,23 +1555,24 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_axidma_tx_segment, node);
+ if (chan->has_sg && list_empty(&chan->active_list))
+ xilinx_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
reg &= ~XILINX_DMA_CR_COALESCE_MAX;
reg |= chan->desc_pendingcount <<
XILINX_DMA_CR_COALESCE_SHIFT;
- dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg)
- xilinx_write(chan, XILINX_DMA_REG_CURDESC,
- head_desc->async_tx.phys);
reg &= ~XILINX_DMA_CR_DELAY_MAX;
reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
- xilinx_dma_start(chan);
+ if (chan->idle)
+ xilinx_dma_start(chan);
if (chan->err)
return;
@@ -1914,8 +1912,10 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
- chan->idle = true;
- chan->start_transfer(chan);
+ if (list_empty(&chan->active_list)) {
+ chan->idle = true;
+ chan->start_transfer(chan);
+ }
spin_unlock(&chan->lock);
}
--
2.25.1
On 2025-07-10 at 10:12:27, Suraj Gupta (suraj.gupta2@amd.com) wrote: > AXI DMA driver incorrectly assumes complete transfer completion upon > IRQ reception, particularly problematic when IRQ coalescing is active. > Updating the tail pointer dynamically fixes it. > Remove existing idle state validation in the beginning of > xilinx_dma_start_transfer() as it blocks valid transfer initiation on > busy channels with queued descriptors. > Additionally, refactor xilinx_dma_start_transfer() to consolidate coalesce > and delay configurations while conditionally starting channels > only when idle. > > Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com> > Fixes: Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine") You series looks like net-next material and this one is fixing some existing bug. Send this one patch seperately to net. Also include net or net-next in subject. Thanks, Sundeep > --- > drivers/dma/xilinx/xilinx_dma.c | 20 ++++++++++---------- > 1 file changed, 10 insertions(+), 10 deletions(-) > > diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c > index a34d8f0ceed8..187749b7b8a6 100644 > --- a/drivers/dma/xilinx/xilinx_dma.c > +++ b/drivers/dma/xilinx/xilinx_dma.c > @@ -1548,9 +1548,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) > if (list_empty(&chan->pending_list)) > return; > > - if (!chan->idle) > - return; > - > head_desc = list_first_entry(&chan->pending_list, > struct xilinx_dma_tx_descriptor, node); > tail_desc = list_last_entry(&chan->pending_list, > @@ -1558,23 +1555,24 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) > tail_segment = list_last_entry(&tail_desc->segments, > struct xilinx_axidma_tx_segment, node); > > + if (chan->has_sg && list_empty(&chan->active_list)) > + xilinx_write(chan, XILINX_DMA_REG_CURDESC, > + head_desc->async_tx.phys); > + > reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); > > if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { > reg &= ~XILINX_DMA_CR_COALESCE_MAX; > reg |= chan->desc_pendingcount << > XILINX_DMA_CR_COALESCE_SHIFT; > - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); > } > > - if (chan->has_sg) > - xilinx_write(chan, XILINX_DMA_REG_CURDESC, > - head_desc->async_tx.phys); > reg &= ~XILINX_DMA_CR_DELAY_MAX; > reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT; > dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); > > - xilinx_dma_start(chan); > + if (chan->idle) > + xilinx_dma_start(chan); > > if (chan->err) > return; > @@ -1914,8 +1912,10 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) > XILINX_DMA_DMASR_DLY_CNT_IRQ)) { > spin_lock(&chan->lock); > xilinx_dma_complete_descriptor(chan); > - chan->idle = true; > - chan->start_transfer(chan); > + if (list_empty(&chan->active_list)) { > + chan->idle = true; > + chan->start_transfer(chan); > + } > spin_unlock(&chan->lock); > } > > -- > 2.25.1 >
On Thu Jul 10, 2025 at 12:12 PM CEST, Suraj Gupta wrote: > AXI DMA driver incorrectly assumes complete transfer completion upon > IRQ reception, particularly problematic when IRQ coalescing is active. > Updating the tail pointer dynamically fixes it. > Remove existing idle state validation in the beginning of > xilinx_dma_start_transfer() as it blocks valid transfer initiation on > busy channels with queued descriptors. > Additionally, refactor xilinx_dma_start_transfer() to consolidate coalesce > and delay configurations while conditionally starting channels > only when idle. > > Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com> > Fixes: Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine") This fixes an issue I recently ran into which prevented starting consecutive transfers. Thanks and: Tested-by: Folker Schwesinger <dev@folker-schwesinger.de> > --- > drivers/dma/xilinx/xilinx_dma.c | 20 ++++++++++---------- > 1 file changed, 10 insertions(+), 10 deletions(-) > > diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c > index a34d8f0ceed8..187749b7b8a6 100644 > --- a/drivers/dma/xilinx/xilinx_dma.c > +++ b/drivers/dma/xilinx/xilinx_dma.c > @@ -1548,9 +1548,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) > if (list_empty(&chan->pending_list)) > return; > > - if (!chan->idle) > - return; > - > head_desc = list_first_entry(&chan->pending_list, > struct xilinx_dma_tx_descriptor, node); > tail_desc = list_last_entry(&chan->pending_list, > @@ -1558,23 +1555,24 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) > tail_segment = list_last_entry(&tail_desc->segments, > struct xilinx_axidma_tx_segment, node); > > + if (chan->has_sg && list_empty(&chan->active_list)) > + xilinx_write(chan, XILINX_DMA_REG_CURDESC, > + head_desc->async_tx.phys); > + > reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); > > if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { > reg &= ~XILINX_DMA_CR_COALESCE_MAX; > reg |= chan->desc_pendingcount << > XILINX_DMA_CR_COALESCE_SHIFT; > - dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); > } > > - if (chan->has_sg) > - xilinx_write(chan, XILINX_DMA_REG_CURDESC, > - head_desc->async_tx.phys); > reg &= ~XILINX_DMA_CR_DELAY_MAX; > reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT; > dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); > > - xilinx_dma_start(chan); > + if (chan->idle) > + xilinx_dma_start(chan); > > if (chan->err) > return; > @@ -1914,8 +1912,10 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) > XILINX_DMA_DMASR_DLY_CNT_IRQ)) { > spin_lock(&chan->lock); > xilinx_dma_complete_descriptor(chan); > - chan->idle = true; > - chan->start_transfer(chan); > + if (list_empty(&chan->active_list)) { > + chan->idle = true; > + chan->start_transfer(chan); > + } > spin_unlock(&chan->lock); > } >
On Thu, Jul 10, 2025 at 03:42:27PM +0530, Suraj Gupta wrote: > AXI DMA driver incorrectly assumes complete transfer completion upon > IRQ reception, particularly problematic when IRQ coalescing is active. > Updating the tail pointer dynamically fixes it. > Remove existing idle state validation in the beginning of > xilinx_dma_start_transfer() as it blocks valid transfer initiation on > busy channels with queued descriptors. > Additionally, refactor xilinx_dma_start_transfer() to consolidate coalesce > and delay configurations while conditionally starting channels > only when idle. > > Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com> > Fixes: Fixes: c0bba3a99f07 ("dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine") Hi, This is not a proper review. And there is probably no need to repost just becuse of it. But: s/Fixes: Fixes: /Fixes: / ...
© 2016 - 2025 Red Hat, Inc.