[PATCH RFC 07/12] dmaengine: virt-dma: split vchan_tx_prep() into init and internal helpers

Frank Li posted 12 patches 1 week, 4 days ago
[PATCH RFC 07/12] dmaengine: virt-dma: split vchan_tx_prep() into init and internal helpers
Posted by Frank Li 1 week, 4 days ago
Split vchan_tx_prep() into vchan_init_dma_async_tx() and __vchan_tx_prep()
to prepare for supporting the common linked-list DMA library.

struct dma_async_tx_descriptor already contains the dma_chan pointer, so
drivers do not need to duplicate it in their own descriptor structures
derived from vchan_desc.

Previously, dma_chan was NULL during descriptor preparation because
vchan_init_dma_async_tx() was called too late. Initializing the
dma_async_tx_descriptor earlier allows drivers to directly access
dma_chan during the prepare phase.

No functional change.

Signed-off-by: Frank Li <Frank.Li@nxp.com>
---
 drivers/dma/virt-dma.h | 38 ++++++++++++++++++++++++++------------
 1 file changed, 26 insertions(+), 12 deletions(-)

diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index a15f9e318ca5ec7fd3c4e6fc6864ad3d1dc3eaa5..ad5ce489cf8e52aa02a0129bc5657fadd6070da2 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -80,17 +80,22 @@ struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
 extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
 
-/**
- * vchan_tx_prep - prepare a descriptor
- * @vc: virtual channel allocating this descriptor
- * @vd: virtual descriptor to prepare
- * @tx_flags: flags argument passed in to prepare function
- */
-static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
-	struct virt_dma_desc *vd, unsigned long tx_flags)
+static inline struct dma_async_tx_descriptor *
+__vchan_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd)
 {
 	unsigned long flags;
 
+	spin_lock_irqsave(&vc->lock, flags);
+	list_add_tail(&vd->node, &vc->desc_allocated);
+	spin_unlock_irqrestore(&vc->lock, flags);
+
+	return &vd->tx;
+}
+
+static inline void
+vchan_init_dma_async_tx(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
+			unsigned long tx_flags)
+{
 	dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
 	vd->tx.flags = tx_flags;
 	vd->tx.tx_submit = vchan_tx_submit;
@@ -98,12 +103,21 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
 
 	vd->tx_result.result = DMA_TRANS_NOERROR;
 	vd->tx_result.residue = 0;
+}
 
-	spin_lock_irqsave(&vc->lock, flags);
-	list_add_tail(&vd->node, &vc->desc_allocated);
-	spin_unlock_irqrestore(&vc->lock, flags);
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * @vc: virtual channel allocating this descriptor
+ * @vd: virtual descriptor to prepare
+ * @tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *
+vchan_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
+	      unsigned long tx_flags)
+{
+	vchan_init_dma_async_tx(vc, vd, tx_flags);
 
-	return &vd->tx;
+	return __vchan_tx_prep(vc, vd);
 }
 
 /**

-- 
2.34.1