[PATCH 08/15] PCI: dwc: ep: Delegate exported eDMA channels through EPC ops

Koichiro Den posted 15 patches 3 weeks, 4 days ago
[PATCH 08/15] PCI: dwc: ep: Delegate exported eDMA channels through EPC ops
Posted by Koichiro Den 3 weeks, 4 days ago
Implement the new EPC DMA delegation hooks for DesignWare endpoint
controllers with integrated eDMA.

The DWC implementation requests channels through DMAEngine, programs
DW_EDMA_CH_IRQ_REMOTE while the channels are delegated, and keeps the
struct dma_chan references in endpoint-private state so the reservation is
maintained until undelegation.

When the channels are returned, restore the default IRQ mode before
releasing them back to DMAEngine.

Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
 .../pci/controller/dwc/pcie-designware-ep.c   | 188 ++++++++++++++++++
 drivers/pci/controller/dwc/pcie-designware.h  |  11 +
 2 files changed, 199 insertions(+)

diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1e584f6a6565..4c997cf1989c 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -818,6 +818,192 @@ dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
 	return ep->ops->get_features(ep);
 }
 
+struct dw_pcie_ep_dma_filter {
+	struct device *dma_dev;
+	u32 direction;
+};
+
+static int
+dw_pcie_ep_dma_dir_to_direction(enum pci_epc_aux_dma_dir dir, u32 *direction)
+{
+	switch (dir) {
+	case PCI_EPC_AUX_DMA_DIR_READ:
+		*direction = BIT(DMA_DEV_TO_MEM);
+		return 0;
+	case PCI_EPC_AUX_DMA_DIR_WRITE:
+		*direction = BIT(DMA_MEM_TO_DEV);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static bool dw_pcie_ep_dma_filter_fn(struct dma_chan *chan, void *arg)
+{
+	struct dma_slave_caps caps;
+	struct dw_pcie_ep_dma_filter *filter = arg;
+	int ret;
+
+	if (chan->device->dev != filter->dma_dev)
+		return false;
+
+	ret = dma_get_slave_caps(chan, &caps);
+	if (ret < 0)
+		return false;
+
+	return !!(caps.directions & filter->direction);
+}
+
+static int dw_pcie_ep_dma_set_irq_mode(struct dma_chan *chan,
+				       enum dw_edma_ch_irq_mode mode)
+{
+	struct dw_edma_peripheral_config pcfg = {
+		.irq_mode = mode,
+	};
+	struct dma_slave_config cfg = {
+		.peripheral_config = &pcfg,
+		.peripheral_size = sizeof(pcfg),
+	};
+
+	return dmaengine_slave_config(chan, &cfg);
+}
+
+static struct dw_pcie_ep_dma_delegated_chan *
+dw_pcie_ep_find_delegated_dma_chan(struct dw_pcie_ep *ep,
+				   enum pci_epc_aux_dma_dir dir, int chan_id)
+{
+	unsigned int i;
+
+	for (i = 0; i < ep->num_delegated_dma_chans; i++) {
+		if (ep->delegated_dma_chans[i].dir != dir)
+			continue;
+		if (ep->delegated_dma_chans[i].chan_id != chan_id)
+			continue;
+		return &ep->delegated_dma_chans[i];
+	}
+
+	return NULL;
+}
+
+static void
+dw_pcie_ep_remove_delegated_dma_chan(struct dw_pcie_ep *ep,
+				     struct dw_pcie_ep_dma_delegated_chan *dchan)
+{
+	unsigned int idx = dchan - ep->delegated_dma_chans;
+
+	if (idx >= ep->num_delegated_dma_chans)
+		return;
+
+	ep->num_delegated_dma_chans--;
+	if (idx != ep->num_delegated_dma_chans)
+		ep->delegated_dma_chans[idx] =
+			ep->delegated_dma_chans[ep->num_delegated_dma_chans];
+
+	memset(&ep->delegated_dma_chans[ep->num_delegated_dma_chans], 0,
+	       sizeof(ep->delegated_dma_chans[0]));
+}
+
+static int
+dw_pcie_ep_undelegate_dma_channels(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+				   enum pci_epc_aux_dma_dir dir,
+				   const int *chan_ids, u32 num_chans)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie_ep_dma_delegated_chan *dchan;
+	int ret, rc = 0;
+	u32 i;
+
+	for (i = 0; i < num_chans; i++) {
+		dchan = dw_pcie_ep_find_delegated_dma_chan(ep, dir, chan_ids[i]);
+		if (!dchan) {
+			if (!rc)
+				rc = -ENOENT;
+			continue;
+		}
+
+		ret = dw_pcie_ep_dma_set_irq_mode(dchan->chan,
+						  DW_EDMA_CH_IRQ_DEFAULT);
+		if (ret && !rc)
+			rc = ret;
+
+		dma_release_channel(dchan->chan);
+		dw_pcie_ep_remove_delegated_dma_chan(ep, dchan);
+	}
+
+	return rc;
+}
+
+static int
+dw_pcie_ep_delegate_dma_channels(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+				 enum pci_epc_aux_dma_dir dir,
+				 u32 req_chans, int *chan_ids, u32 max_chans)
+{
+	struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+	struct dw_pcie_ep_dma_filter filter;
+	dma_cap_mask_t dma_mask;
+	struct dma_chan *chan;
+	u32 delegated = 0;
+	u32 direction;
+	int ret;
+
+	ret = dw_pcie_ep_dma_dir_to_direction(dir, &direction);
+	if (ret)
+		return ret;
+
+	if (!pci->edma.dev)
+		return -ENODEV;
+
+	/* Limit to integrated DMA engine */
+	filter.dma_dev = pci->edma.dev;
+	filter.direction = direction;
+
+	dma_cap_zero(dma_mask);
+	dma_cap_set(DMA_SLAVE, dma_mask);
+
+	ret = -ENODEV;
+	while (delegated < req_chans && delegated < max_chans) {
+		if (ep->num_delegated_dma_chans >=
+		    ARRAY_SIZE(ep->delegated_dma_chans)) {
+			ret = -ENOSPC;
+			break;
+		}
+
+		chan = dma_request_channel(dma_mask, dw_pcie_ep_dma_filter_fn,
+					   &filter);
+		if (!chan)
+			break;
+
+		ret = dw_pcie_ep_dma_set_irq_mode(chan, DW_EDMA_CH_IRQ_REMOTE);
+		if (ret) {
+			dma_release_channel(chan);
+			goto err_release;
+		}
+
+		if (chan->chan_id < 0) {
+			dma_release_channel(chan);
+			ret = -ERANGE;
+			goto err_release;
+		}
+
+		ep->delegated_dma_chans[ep->num_delegated_dma_chans++] =
+			(struct dw_pcie_ep_dma_delegated_chan) {
+				.chan = chan,
+				.chan_id = chan->chan_id,
+				.dir = dir,
+			};
+		chan_ids[delegated++] = chan->chan_id;
+	}
+
+	return delegated ? : ret;
+
+err_release:
+	dw_pcie_ep_undelegate_dma_channels(epc, func_no, vfunc_no, dir,
+					   chan_ids, delegated);
+
+	return ret;
+}
+
 static const struct pci_epc_bar_rsvd_region *
 dw_pcie_ep_find_bar_rsvd_region(struct dw_pcie_ep *ep,
 				enum pci_epc_bar_rsvd_region_type type,
@@ -991,6 +1177,8 @@ static const struct pci_epc_ops epc_ops = {
 	.stop			= dw_pcie_ep_stop,
 	.get_features		= dw_pcie_ep_get_features,
 	.get_aux_resources	= dw_pcie_ep_get_aux_resources,
+	.delegate_dma_channels = dw_pcie_ep_delegate_dma_channels,
+	.undelegate_dma_channels = dw_pcie_ep_undelegate_dma_channels,
 };
 
 /**
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 52f26663e8b1..d7d60278fbba 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -479,6 +479,12 @@ struct dw_pcie_ep_func {
 	unsigned int		num_ib_atu_indexes[PCI_STD_NUM_BARS];
 };
 
+struct dw_pcie_ep_dma_delegated_chan {
+	struct dma_chan		*chan;
+	int			chan_id;
+	u8			dir;
+};
+
 struct dw_pcie_ep {
 	struct pci_epc		*epc;
 	struct list_head	func_list;
@@ -496,6 +502,11 @@ struct dw_pcie_ep {
 	bool			msi_iatu_mapped;
 	u64			msi_msg_addr;
 	size_t			msi_map_size;
+
+	/* DMA channels reserved for peer export */
+	u8			num_delegated_dma_chans;
+	struct dw_pcie_ep_dma_delegated_chan
+				delegated_dma_chans[EDMA_MAX_WR_CH + EDMA_MAX_RD_CH];
 };
 
 struct dw_pcie_ops {
-- 
2.51.0