Remove the restrictive idle check in xilinx_dma_start_transfer() and
xilinx_mcdma_start_transfer() that prevented new transfers from being
queued when the channel was busy.
Additionally, only update the CURDESC register when the channel is
running in scatter-gather mode and active list is empty to avoid
interfering with transfers already in progress. When the active list
contains transfers, the hardware tail pointer extension mechanism
handles chaining automatically.
Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com>
Co-developed-by: Srinivas Neeli <srinivas.neeli@amd.com>
Signed-off-by: Srinivas Neeli <srinivas.neeli@amd.com>
---
drivers/dma/xilinx/xilinx_dma.c | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 53b82ddad007..aa6589e88c5c 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -1548,9 +1548,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (list_empty(&chan->pending_list))
return;
- if (!chan->idle)
- return;
-
head_desc = list_first_entry(&chan->pending_list,
struct xilinx_dma_tx_descriptor, node);
tail_desc = list_last_entry(&chan->pending_list,
@@ -1567,7 +1564,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
}
- if (chan->has_sg)
+ if (chan->has_sg && list_empty(&chan->active_list))
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
reg &= ~XILINX_DMA_CR_DELAY_MAX;
@@ -1627,9 +1624,6 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->err)
return;
- if (!chan->idle)
- return;
-
if (list_empty(&chan->pending_list))
return;
@@ -1652,8 +1646,9 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
/* Program current descriptor */
- xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
- head_desc->async_tx.phys);
+ if (chan->has_sg && list_empty(&chan->active_list))
+ xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
+ head_desc->async_tx.phys);
/* Program channel enable register */
reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
--
2.25.1
On Fri Oct 3, 2025 at 8:19 AM CEST, Suraj Gupta wrote: > Remove the restrictive idle check in xilinx_dma_start_transfer() and > xilinx_mcdma_start_transfer() that prevented new transfers from being > queued when the channel was busy. > Additionally, only update the CURDESC register when the channel is > running in scatter-gather mode and active list is empty to avoid > interfering with transfers already in progress. When the active list > contains transfers, the hardware tail pointer extension mechanism > handles chaining automatically. > > Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com> > Co-developed-by: Srinivas Neeli <srinivas.neeli@amd.com> > Signed-off-by: Srinivas Neeli <srinivas.neeli@amd.com> For the AXIDMA code paths: Tested-by: Folker Schwesinger <dev@folker-schwesinger.de> > --- > drivers/dma/xilinx/xilinx_dma.c | 13 ++++--------- > 1 file changed, 4 insertions(+), 9 deletions(-) > > diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c > index 53b82ddad007..aa6589e88c5c 100644 > --- a/drivers/dma/xilinx/xilinx_dma.c > +++ b/drivers/dma/xilinx/xilinx_dma.c > @@ -1548,9 +1548,6 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) > if (list_empty(&chan->pending_list)) > return; > > - if (!chan->idle) > - return; > - > head_desc = list_first_entry(&chan->pending_list, > struct xilinx_dma_tx_descriptor, node); > tail_desc = list_last_entry(&chan->pending_list, > @@ -1567,7 +1564,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) > dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); > } > > - if (chan->has_sg) > + if (chan->has_sg && list_empty(&chan->active_list)) > xilinx_write(chan, XILINX_DMA_REG_CURDESC, > head_desc->async_tx.phys); > reg &= ~XILINX_DMA_CR_DELAY_MAX; > @@ -1627,9 +1624,6 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) > if (chan->err) > return; > > - if (!chan->idle) > - return; > - > if (list_empty(&chan->pending_list)) > return; > > @@ -1652,8 +1646,9 @@ static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan) > dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg); > > /* Program current descriptor */ > - xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), > - head_desc->async_tx.phys); > + if (chan->has_sg && list_empty(&chan->active_list)) > + xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest), > + head_desc->async_tx.phys); > > /* Program channel enable register */ > reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
© 2016 - 2025 Red Hat, Inc.