Move the channel-enable logic into a new helper function,
dw_(edma|hdma)_v0_core_ch_enable(), in preparation for supporting dynamic
link entry additions.
No functional changes.
Signed-off-by: Frank Li <Frank.Li@nxp.com>
---
drivers/dma/dw-edma/dw-edma-v0-core.c | 128 +++++++++++++++++-----------------
drivers/dma/dw-edma/dw-hdma-v0-core.c | 49 +++++++------
2 files changed, 91 insertions(+), 86 deletions(-)
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 79265684613df4f4a30d6108d696b95a2934dffe..cd99bb34452d19eb9fd04b237609545ab1092eaa 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -318,6 +318,67 @@ static void dw_edma_v0_write_ll_link(struct dw_edma_chan *chan,
}
}
+static void dw_edma_v0_core_ch_enable(struct dw_edma_chan *chan)
+{
+ struct dw_edma *dw = chan->dw;
+ unsigned int long flags;
+ u32 tmp;
+
+ /* Enable engine */
+ SET_RW_32(dw, chan->dir, engine_en, BIT(0));
+ if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
+ switch (chan->id) {
+ case 0:
+ SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en, BIT(0));
+ break;
+ case 1:
+ SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en, BIT(0));
+ break;
+ case 2:
+ SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en, BIT(0));
+ break;
+ case 3:
+ SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en, BIT(0));
+ break;
+ case 4:
+ SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en, BIT(0));
+ break;
+ case 5:
+ SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en, BIT(0));
+ break;
+ case 6:
+ SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en, BIT(0));
+ break;
+ case 7:
+ SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en, BIT(0));
+ break;
+ }
+ }
+ /* Interrupt unmask - done, abort */
+ raw_spin_lock_irqsave(&dw->lock, flags);
+
+ tmp = GET_RW_32(dw, chan->dir, int_mask);
+ tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
+ tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
+ SET_RW_32(dw, chan->dir, int_mask, tmp);
+ /* Linked list error */
+ tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
+ tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
+ SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
+
+ raw_spin_unlock_irqrestore(&dw->lock, flags);
+
+ /* Channel control */
+ SET_CH_32(dw, chan->dir, chan->id, ch_control1,
+ (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
+ /* Linked list */
+ /* llp is not aligned on 64bit -> keep 32bit accesses */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chan->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chan->ll_region.paddr));
+}
+
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
@@ -366,74 +427,11 @@ static void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
- unsigned long flags;
- u32 tmp;
dw_edma_v0_core_write_chunk(chunk);
- if (first) {
- /* Enable engine */
- SET_RW_32(dw, chan->dir, engine_en, BIT(0));
- if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
- switch (chan->id) {
- case 0:
- SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
- BIT(0));
- break;
- case 1:
- SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
- BIT(0));
- break;
- case 2:
- SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
- BIT(0));
- break;
- case 3:
- SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
- BIT(0));
- break;
- case 4:
- SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
- BIT(0));
- break;
- case 5:
- SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
- BIT(0));
- break;
- case 6:
- SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
- BIT(0));
- break;
- case 7:
- SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
- BIT(0));
- break;
- }
- }
- /* Interrupt unmask - done, abort */
- raw_spin_lock_irqsave(&dw->lock, flags);
-
- tmp = GET_RW_32(dw, chan->dir, int_mask);
- tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
- tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
- SET_RW_32(dw, chan->dir, int_mask, tmp);
- /* Linked list error */
- tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
- tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
- SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
-
- raw_spin_unlock_irqrestore(&dw->lock, flags);
-
- /* Channel control */
- SET_CH_32(dw, chan->dir, chan->id, ch_control1,
- (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
- /* Linked list */
- /* llp is not aligned on 64bit -> keep 32bit accesses */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chan->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chan->ll_region.paddr));
- }
+ if (first)
+ dw_edma_v0_core_ch_enable(chan);
dw_edma_v0_sync_ll_data(chan);
diff --git a/drivers/dma/dw-edma/dw-hdma-v0-core.c b/drivers/dma/dw-edma/dw-hdma-v0-core.c
index 27f79d9b97d91fdbafc4f1e1e4d099bbbddf60e2..953868ef424250c1b696b9e61b72ba9a9c7c38c9 100644
--- a/drivers/dma/dw-edma/dw-hdma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-hdma-v0-core.c
@@ -194,6 +194,31 @@ static void dw_hdma_v0_write_ll_link(struct dw_edma_chan *chan,
}
}
+static void dw_hdma_v0_core_ch_enable(struct dw_edma_chan *chan)
+{
+ struct dw_edma *dw = chan->dw;
+ u32 tmp;
+
+ /* Enable engine */
+ SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
+ /* Interrupt unmask - stop, abort */
+ tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup);
+ tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
+ /* Interrupt enable - stop, abort */
+ tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
+ if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
+ tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
+ SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
+ /* Channel control */
+ SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
+ /* Linked list */
+ /* llp is not aligned on 64bit -> keep 32bit accesses */
+ SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+ lower_32_bits(chan->ll_region.paddr));
+ SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+ upper_32_bits(chan->ll_region.paddr));
+}
+
static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_chan *chan = chunk->chan;
@@ -232,30 +257,12 @@ static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->dw;
- u32 tmp;
dw_hdma_v0_core_write_chunk(chunk);
- if (first) {
- /* Enable engine */
- SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
- /* Interrupt unmask - stop, abort */
- tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup);
- tmp &= ~(HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
- /* Interrupt enable - stop, abort */
- tmp |= HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
- if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
- tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
- SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
- /* Channel control */
- SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
- /* Linked list */
- /* llp is not aligned on 64bit -> keep 32bit accesses */
- SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
- lower_32_bits(chan->ll_region.paddr));
- SET_CH_32(dw, chan->dir, chan->id, llp.msb,
- upper_32_bits(chan->ll_region.paddr));
- }
+ if (first)
+ dw_hdma_v0_core_ch_enable(chan);
+
/* Set consumer cycle */
SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
--
2.34.1