From: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
Subsequent patches will add suspend/resume and cyclic DMA support to the
rz-dmac driver. This support needs to work on SoCs where power to most
components (including DMA) is turned off during system suspend. For this,
some channels (for example cyclic ones) may need to be paused and resumed
manually by the DMA driver during system suspend/resume.
Refactor the pause/resume support so the same code can be reused in the
system suspend/resume path.
Signed-off-by: Claudiu Beznea <claudiu.beznea.uj@bp.renesas.com>
---
Changes in v3:
- none, this patch new new
drivers/dma/sh/rz-dmac.c | 68 +++++++++++++++++++++++++++++++++-------
1 file changed, 57 insertions(+), 11 deletions(-)
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index d47c7601907f..bacde5e28616 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -18,6 +18,7 @@
#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/irqchip/irq-renesas-rzt2h.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
@@ -63,6 +64,14 @@ struct rz_dmac_desc {
#define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd)
+/**
+ * enum rz_dmac_chan_status: RZ DMAC channel status
+ * @RZ_DMAC_CHAN_STATUS_PAUSED: Channel is paused though DMA engine callbacks
+ */
+enum rz_dmac_chan_status {
+ RZ_DMAC_CHAN_STATUS_PAUSED,
+};
+
struct rz_dmac_chan {
struct virt_dma_chan vc;
void __iomem *ch_base;
@@ -74,6 +83,8 @@ struct rz_dmac_chan {
dma_addr_t src_per_address;
dma_addr_t dst_per_address;
+ unsigned long status;
+
u32 chcfg;
u32 chctrl;
int mid_rid;
@@ -792,35 +803,70 @@ static enum dma_status rz_dmac_tx_status(struct dma_chan *chan,
return status;
}
-static int rz_dmac_device_pause(struct dma_chan *chan)
+static int rz_dmac_device_pause_set(struct rz_dmac_chan *channel,
+ unsigned long set_bitmask)
{
- struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+ int ret = 0;
u32 val;
- guard(spinlock_irqsave)(&channel->vc.lock);
+ lockdep_assert_held(&channel->vc.lock);
if (!rz_dmac_chan_is_enabled(channel))
return 0;
+ if (rz_dmac_chan_is_paused(channel))
+ goto set_bit;
+
rz_dmac_ch_writel(channel, CHCTRL_SETSUS, CHCTRL, 1);
- return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
- (val & CHSTAT_SUS), 1, 1024,
- false, channel, CHSTAT, 1);
+ ret = read_poll_timeout_atomic(rz_dmac_ch_readl, val,
+ (val & CHSTAT_SUS), 1, 1024, false,
+ channel, CHSTAT, 1);
+
+set_bit:
+ channel->status |= set_bitmask;
+
+ return ret;
}
-static int rz_dmac_device_resume(struct dma_chan *chan)
+static int rz_dmac_device_pause(struct dma_chan *chan)
{
struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
- u32 val;
guard(spinlock_irqsave)(&channel->vc.lock);
+ return rz_dmac_device_pause_set(channel, BIT(RZ_DMAC_CHAN_STATUS_PAUSED));
+}
+
+static int rz_dmac_device_resume_set(struct rz_dmac_chan *channel,
+ unsigned long clear_bitmask)
+{
+ int ret = 0;
+ u32 val;
+
+ lockdep_assert_held(&channel->vc.lock);
+
/* Do not check CHSTAT_SUS but rely on HW capabilities. */
rz_dmac_ch_writel(channel, CHCTRL_CLRSUS, CHCTRL, 1);
- return read_poll_timeout_atomic(rz_dmac_ch_readl, val,
- !(val & CHSTAT_SUS), 1, 1024,
- false, channel, CHSTAT, 1);
+ ret = read_poll_timeout_atomic(rz_dmac_ch_readl, val,
+ !(val & CHSTAT_SUS), 1, 1024, false,
+ channel, CHSTAT, 1);
+
+ channel->status &= ~clear_bitmask;
+
+ return ret;
+}
+
+static int rz_dmac_device_resume(struct dma_chan *chan)
+{
+ struct rz_dmac_chan *channel = to_rz_dmac_chan(chan);
+
+ guard(spinlock_irqsave)(&channel->vc.lock);
+
+ if (!(channel->status & BIT(RZ_DMAC_CHAN_STATUS_PAUSED)))
+ return 0;
+
+ return rz_dmac_device_resume_set(channel, BIT(RZ_DMAC_CHAN_STATUS_PAUSED));
}
/*
--
2.43.0