Enable the ECAM feature if the config space size is equal to size required
to represent number of buses in the bus range property.
The ELBI registers falls after the DBI space, so use the cfg win returned
from the ecam init to map these regions instead of doing the ioremap again.
ELBI starts at offset 0xf20 from dbi.
On bus 0, we have only the root complex. Any access other than that should
not go out of the link and should return all F's. Since the IATU is
configured for bus 1 onwards, block the transactions for bus 0:0:1 to
0:31:7 (i.e., from dbi_base + 4KB to dbi_base + 1MB) from going outside the
link through ecam blocker through parf registers.
Signed-off-by: Krishna chaitanya chundru <quic_krichai@quicinc.com>
---
drivers/pci/controller/dwc/pcie-qcom.c | 104 +++++++++++++++++++++++++++++++--
1 file changed, 100 insertions(+), 4 deletions(-)
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index ef44a82be058..266de2aa3a71 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -61,6 +61,17 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360
+#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364
+#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368
+#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c
+#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370
+#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374
+#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378
+#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c
+#define PARF_ECAM_BASE 0x380
+#define PARF_ECAM_BASE_HI 0x384
+
#define PARF_NO_SNOOP_OVERIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
@@ -68,6 +79,8 @@
#define PARF_BDF_TO_SID_TABLE_N 0x2000
#define PARF_BDF_TO_SID_CFG 0x2c00
+#define ELBI_OFFSET 0xf20
+
/* ELBI registers */
#define ELBI_SYS_CTRL 0x04
@@ -84,6 +97,7 @@
/* PARF_SYS_CTRL register fields */
#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
+#define PCIE_ECAM_BLOCKER_EN BIT(26)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -293,15 +307,68 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
+static int qcom_pci_config_ecam_blocker(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u64 addr, addr_end;
+ u32 val;
+
+ /* Set the ECAM base */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI);
+
+ /*
+ * On bus 0, we have only the root complex. Any access other than that
+ * should not go out of the link and should return all F's. Since the
+ * IATU is configured for bus 1 onwards, block the transactions for
+ * bus 0:0:1 to 0:31:7 (i.e from dbi_base + 4kb to dbi_base + 1MB) from
+ * going outside the link.
+ */
+ addr = pci->dbi_phys_addr + SZ_4K;
+ writel(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE);
+ writel(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI);
+
+ writel(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE);
+ writel(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI);
+
+ addr_end = pci->dbi_phys_addr + SZ_1M - 1;
+
+ writel(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT);
+ writel(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI);
+
+ writel(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT);
+ writel(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI);
+
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val |= PCIE_ECAM_BLOCKER_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
+ return 0;
+}
+
+static int qcom_pcie_ecam_init(struct dw_pcie *pci, struct pci_config_window *cfg)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ pcie->elbi = pci->dbi_base + ELBI_OFFSET;
+ return 0;
+}
+
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ int ret;
if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
qcom_pcie_common_set_16gt_equalization(pci);
qcom_pcie_common_set_16gt_lane_margining(pci);
}
+ if (pci->pp.enable_ecam) {
+ ret = qcom_pci_config_ecam_blocker(&pci->pp);
+ if (ret)
+ return ret;
+ }
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
pcie->cfg->ops->ltssm_enable(pcie);
@@ -1297,6 +1364,7 @@ static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.init = qcom_pcie_host_init,
.deinit = qcom_pcie_host_deinit,
.post_init = qcom_pcie_host_post_init,
+ .ecam_init = qcom_pcie_ecam_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1566,6 +1634,31 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
+static bool qcom_pcie_check_ecam_support(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource bus_range, *config_res;
+ u64 bus_config_space_count;
+ int ret;
+
+ /* If bus range is not present, keep the bus range as maximum value */
+ ret = of_pci_parse_bus_range(dev->of_node, &bus_range);
+ if (ret) {
+ bus_range.start = 0x0;
+ bus_range.end = 0xff;
+ }
+
+ config_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (!config_res)
+ return false;
+
+ bus_config_space_count = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
+ if (resource_size(&bus_range) > bus_config_space_count)
+ return false;
+
+ return true;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
@@ -1600,6 +1693,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.enable_ecam = qcom_pcie_check_ecam_support(dev);
pp = &pci->pp;
pcie->pci = pci;
@@ -1618,10 +1712,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(pcie->elbi)) {
- ret = PTR_ERR(pcie->elbi);
- goto err_pm_runtime_put;
+ if (!pp->enable_ecam) {
+ pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
+ if (IS_ERR(pcie->elbi)) {
+ ret = PTR_ERR(pcie->elbi);
+ goto err_pm_runtime_put;
+ }
}
/* MHI region is optional */
--
2.34.1
© 2016 - 2024 Red Hat, Inc.