Some peripherals, mainly from the audio subsystem, on RZ/V2H and RZ/G3E
SoCs require explicit ACK signal routing through the ICU.
Extend the driver to support an optional second DMA specifier cell that
contains the ACK signal number. When present, program the ICU accordingly
during channel configuration. This maintains backward compatibility with
single-cell DMA specifiers.
Signed-off-by: John Madieu <john.madieu.xa@bp.renesas.com>
---
drivers/dma/sh/rz-dmac.c | 40 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 39 insertions(+), 1 deletion(-)
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 240c318b5753..d4a8cc95b871 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -97,6 +97,7 @@ struct rz_dmac_chan {
u32 chcfg;
u32 chctrl;
int mid_rid;
+ int dmac_ack;
struct {
u32 nxla;
@@ -124,6 +125,9 @@ struct rz_dmac_icu {
struct rz_dmac_info {
void (*icu_register_dma_req)(struct platform_device *icu_dev,
u8 dmac_index, u8 dmac_channel, u16 req_no);
+ void (*icu_register_dma_ack)(struct platform_device *icu_dev, u8 dmac_index,
+ u8 dmac_channel, u16 ack_no);
+ u16 default_dma_ack_no;
u16 default_dma_req_no;
};
@@ -362,6 +366,25 @@ static void rz_dmac_set_dma_req_no(struct rz_dmac *dmac, unsigned int index,
rz_dmac_set_dmars_register(dmac, index, req_no);
}
+static void rz_dmac_set_dma_ack_no(struct rz_dmac *dmac, unsigned int index,
+ u16 ack_no)
+{
+ if (!dmac->info->icu_register_dma_ack)
+ return;
+
+ dmac->info->icu_register_dma_ack(dmac->icu.pdev, dmac->icu.dmac_index,
+ index, ack_no);
+}
+
+static void rz_dmac_reset_dma_ack_no(struct rz_dmac *dmac, int ack_no)
+{
+ if (ack_no < 0 || !dmac->info->icu_register_dma_ack)
+ return;
+
+ dmac->info->icu_register_dma_ack(dmac->icu.pdev, dmac->icu.dmac_index,
+ dmac->info->default_dma_ack_no, ack_no);
+}
+
static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
{
struct dma_chan *chan = &channel->vc.chan;
@@ -431,6 +454,7 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
+ rz_dmac_set_dma_ack_no(dmac, channel->index, channel->dmac_ack);
}
static void rz_dmac_prepare_descs_for_cyclic(struct rz_dmac_chan *channel)
@@ -485,6 +509,7 @@ static void rz_dmac_prepare_descs_for_cyclic(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
+ rz_dmac_set_dma_ack_no(dmac, channel->index, channel->dmac_ack);
}
static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan)
@@ -567,6 +592,9 @@ static void rz_dmac_free_chan_resources(struct dma_chan *chan)
channel->mid_rid = -EINVAL;
}
+ rz_dmac_reset_dma_ack_no(dmac, channel->dmac_ack);
+ channel->dmac_ack = -EINVAL;
+
spin_unlock_irqrestore(&channel->vc.lock, flags);
list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) {
@@ -814,6 +842,7 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
dev_warn(dmac->dev, "DMA Timeout");
rz_dmac_set_dma_req_no(dmac, channel->index, dmac->info->default_dma_req_no);
+ rz_dmac_reset_dma_ack_no(dmac, channel->dmac_ack);
}
static struct rz_lmdesc *
@@ -1164,6 +1193,10 @@ static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg)
channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) |
CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg);
+ /* ACK signal number from optional second cell */
+ if (dma_spec->args_count == 2 && dmac->info->icu_register_dma_ack)
+ channel->dmac_ack = FIELD_GET(GENMASK(6, 0), dma_spec->args[1]);
+
return !test_and_set_bit(channel->mid_rid, dmac->modules);
}
@@ -1172,7 +1205,8 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
{
dma_cap_mask_t mask;
- if (dma_spec->args_count != 1)
+ /* Accept 1 cell (basic) or 2 cells (with ACK signal) */
+ if (dma_spec->args_count < 1 || dma_spec->args_count > 2)
return NULL;
/* Only slave DMA channels can be allocated via DT */
@@ -1200,6 +1234,7 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
channel->index = index;
channel->mid_rid = -EINVAL;
+ channel->dmac_ack = -EINVAL;
/* Request the channel interrupt. */
scnprintf(pdev_irqname, sizeof(pdev_irqname), "ch%u", index);
@@ -1568,6 +1603,7 @@ static int rz_dmac_resume(struct device *dev)
guard(spinlock_irqsave)(&channel->vc.lock);
rz_dmac_set_dma_req_no(dmac, channel->index, channel->mid_rid);
+ rz_dmac_set_dma_ack_no(dmac, channel->index, channel->dmac_ack);
if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_CYCLIC))) {
rz_dmac_ch_writel(&dmac->channels[i], CHCTRL_DEFAULT, CHCTRL, 1);
@@ -1599,6 +1635,8 @@ static const struct dev_pm_ops rz_dmac_pm_ops = {
static const struct rz_dmac_info rz_dmac_v2h_info = {
.icu_register_dma_req = rzv2h_icu_register_dma_req,
+ .icu_register_dma_ack = rzv2h_icu_register_dma_ack,
+ .default_dma_ack_no = RZV2H_ICU_DMAC_ACK_NO_DEFAULT,
.default_dma_req_no = RZV2H_ICU_DMAC_REQ_NO_DEFAULT,
};
--
2.25.1