From: Manikandan K Pillai <mpillai@cadence.com>
Add support for the Cadence PCIe HPA controller by adding
the required callback functions. Update the common functions for
RP and EP configuration. Invoke the relevant callback functions
for platform probe of PCIe controller using the callback function.
Signed-off-by: Manikandan K Pillai <mpillai@cadence.com>
Co-developed-by: Hans Zhang <hans.zhang@cixtech.com>
Signed-off-by: Hans Zhang <hans.zhang@cixtech.com>
---
.../pci/controller/cadence/pcie-cadence-ep.c | 29 +-
.../controller/cadence/pcie-cadence-host.c | 271 ++++++++++++++++--
.../controller/cadence/pcie-cadence-plat.c | 23 ++
drivers/pci/controller/cadence/pcie-cadence.c | 196 ++++++++++++-
drivers/pci/controller/cadence/pcie-cadence.h | 12 +
5 files changed, 488 insertions(+), 43 deletions(-)
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index f3f956fa116b..f4961c760434 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -192,7 +192,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
}
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
- cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
+ pcie->ops->set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
ep->ob_addr[r] = addr;
@@ -214,7 +214,7 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
if (r == ep->max_regions - 1)
return;
- cdns_pcie_reset_outbound_region(pcie, r);
+ pcie->ops->reset_outbound_region(pcie, r);
ep->ob_addr[r] = 0;
clear_bit(r, &ep->ob_region_map);
@@ -329,8 +329,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
ep->irq_pci_fn != fn)) {
/* First region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
- ep->irq_phys_addr);
+ pcie->ops->set_outbound_region_for_normal_msg(pcie, 0, fn, 0, ep->irq_phys_addr);
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
ep->irq_pci_fn = fn;
}
@@ -411,11 +410,11 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
ep->irq_pci_fn != fn)) {
/* First region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
- false,
- ep->irq_phys_addr,
- pci_addr & ~pci_addr_mask,
- pci_addr_mask + 1);
+ pcie->ops->set_outbound_region(pcie, 0, fn, 0,
+ false,
+ ep->irq_phys_addr,
+ pci_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
ep->irq_pci_fn = fn;
}
@@ -514,11 +513,11 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
ep->irq_pci_fn != fn) {
/* First region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
- false,
- ep->irq_phys_addr,
- msg_addr & ~pci_addr_mask,
- pci_addr_mask + 1);
+ pcie->ops->set_outbound_region(pcie, 0, fn, 0,
+ false,
+ ep->irq_phys_addr,
+ msg_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
ep->irq_pci_fn = fn;
}
@@ -869,7 +868,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
set_bit(0, &ep->ob_region_map);
if (ep->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
+ pcie->ops->detect_quiet_min_delay_set(&ep->pcie);
spin_lock_init(&ep->lock);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index ce035eef0a5c..c7066ea3b9e8 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -60,10 +60,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
/* Configuration Type 0 or Type 1 access. */
desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
- /*
- * The bus number was already set once for all in desc1 by
- * cdns_pcie_host_init_address_translation().
- */
+
if (busn == bridge->busnr + 1)
desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
else
@@ -73,12 +70,81 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+ struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
+ struct cdns_pcie *pcie = &rc->pcie;
+ unsigned int busn = bus->number;
+ u32 addr0, desc0, desc1, ctrl0;
+ u32 regval;
+
+ if (pci_is_root_bus(bus)) {
+ /*
+ * Only the root port (devfn == 0) is connected to this bus.
+ * All other PCI devices are behind some bridge hence on another
+ * bus.
+ */
+ if (devfn)
+ return NULL;
+
+ return pcie->reg_base + (where & 0xfff);
+ }
+
+ /*
+ * Clear AXI link-down status
+ */
+ regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN,
+ (regval & GENMASK(0, 0)));
+
+ desc1 = 0;
+ ctrl0 = 0;
+
+ /*
+ * Update Output registers for AXI region 0.
+ */
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0), addr0);
+
+ desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0));
+ desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK;
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+
+ if (busn == bridge->busnr + 1)
+ desc0 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
+ else
+ desc0 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0);
+
+ return rc->cfg_base + (where & 0xfff);
+}
+
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
};
+static struct pci_ops cdns_pcie_hpa_host_ops = {
+ .map_bus = cdns_pci_hpa_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
{
u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
@@ -154,8 +220,14 @@ static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
{
u32 val;
- val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
- cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+ if (!pcie->is_hpa) {
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+ } else {
+ val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL,
+ val | CDNS_PCIE_HPA_LM_TPM_CTRL_PTMRSEN);
+ }
}
static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
@@ -340,8 +412,8 @@ static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
*/
bar = cdns_pcie_host_find_min_bar(rc, size);
if (bar != RP_BAR_UNDEFINED) {
- ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
- size, flags);
+ ret = pcie->ops->host_bar_ib_config(rc, bar, cpu_addr,
+ size, flags);
if (ret)
dev_err(dev, "IB BAR: %d config failed\n", bar);
return ret;
@@ -366,8 +438,7 @@ static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
}
winsize = bar_max_size[bar];
- ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
- flags);
+ ret = pcie->ops->host_bar_ib_config(rc, bar, cpu_addr, winsize, flags);
if (ret) {
dev_err(dev, "IB BAR: %d config failed\n", bar);
return ret;
@@ -408,8 +479,8 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
if (list_empty(&bridge->dma_ranges)) {
of_property_read_u32(np, "cdns,no-bar-match-nbits",
&no_bar_nbits);
- err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
- (u64)1 << no_bar_nbits, 0);
+ err = pcie->ops->host_bar_ib_config(rc, RP_NO_BAR, 0x0,
+ (u64)1 << no_bar_nbits, 0);
if (err)
dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
return err;
@@ -467,17 +538,159 @@ int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
u64 pci_addr = res->start - entry->offset;
if (resource_type(res) == IORESOURCE_IO)
- cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
- true,
- pci_pio_to_address(res->start),
- pci_addr,
- resource_size(res));
+ pcie->ops->set_outbound_region(pcie, busnr, 0, r,
+ true,
+ pci_pio_to_address(res->start),
+ pci_addr,
+ resource_size(res));
+ else
+ pcie->ops->set_outbound_region(pcie, busnr, 0, r,
+ false,
+ res->start,
+ pci_addr,
+ resource_size(res));
+
+ r++;
+ }
+
+ return cdns_pcie_host_map_dma_ranges(rc);
+}
+
+int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+
+ /*
+ * Set the root complex BAR configuration register:
+ * - disable both BAR0 and BAR1.
+ * - enable Prefetchable Memory Base and Limit registers in type 1
+ * config space (64 bits).
+ * - enable IO Base and Limit registers in type 1 config
+ * space (32 bits).
+ */
+
+ ctrl = CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED;
+ value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS;
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG,
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
+
+ if (rc->vendor_id != 0xffff)
+ cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
+
+ if (rc->device_id != 0xffff)
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
+
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+
+ return 0;
+}
+
+int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr, u64 size,
+ unsigned long flags)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 addr0, addr1, aperture, value;
+
+ if (!rc->avail_ib_bar[bar])
+ return -EBUSY;
+
+ rc->avail_ib_bar[bar] = false;
+
+ aperture = ilog2(size);
+ addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
+ CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
+ CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar), addr1);
+
+ if (bar == RP_NO_BAR)
+ return 0;
+
+ value = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG);
+ value &= ~(HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ HPA_LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ if (size + cpu_addr >= SZ_4G) {
+ if (!(flags & IORESOURCE_PREFETCH))
+ value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
+ value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
+ } else {
+ if (!(flags & IORESOURCE_PREFETCH))
+ value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
+ value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
+ }
+
+ value |= HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
+
+ return 0;
+}
+
+int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource *cfg_res = rc->cfg_res;
+ struct resource_entry *entry;
+ u64 cpu_addr = cfg_res->start;
+ u32 addr0, addr1, desc1;
+ int r, busnr = 0;
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (entry)
+ busnr = entry->res->start;
+
+ /*
+ * Reserve region 0 for PCI configure space accesses:
+ * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
+ * cdns_pci_map_bus(), other region registers are set here once for all.
+ */
+ addr1 = 0;
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(0), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(0), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(0), addr1);
+
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ struct resource *res = entry->res;
+ u64 pci_addr = res->start - entry->offset;
+
+ if (resource_type(res) == IORESOURCE_IO)
+ pcie->ops->set_outbound_region(pcie, busnr, 0, r,
+ true,
+ pci_pio_to_address(res->start),
+ pci_addr,
+ resource_size(res));
else
- cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
- false,
- res->start,
- pci_addr,
- resource_size(res));
+ pcie->ops->set_outbound_region(pcie, busnr, 0, r,
+ false,
+ res->start,
+ pci_addr,
+ resource_size(res));
r++;
}
@@ -489,11 +702,11 @@ int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
- err = cdns_pcie_host_init_root_port(rc);
+ err = rc->pcie.ops->host_init_root_port(rc);
if (err)
return err;
- return cdns_pcie_host_init_address_translation(rc);
+ return rc->pcie.ops->host_init_address_translation(rc);
}
int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
@@ -503,7 +716,7 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
int ret;
if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+ pcie->ops->detect_quiet_min_delay_set(&rc->pcie);
cdns_pcie_host_enable_ptm_response(pcie);
@@ -566,8 +779,12 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
if (ret)
return ret;
- if (!bridge->ops)
- bridge->ops = &cdns_pcie_host_ops;
+ if (!bridge->ops) {
+ if (pcie->is_hpa)
+ bridge->ops = &cdns_pcie_hpa_host_ops;
+ else
+ bridge->ops = &cdns_pcie_host_ops;
+ }
ret = pci_host_probe(bridge);
if (ret < 0)
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
index b24176d4df1f..8d5fbaef0a3c 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -43,7 +43,30 @@ static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr)
}
static const struct cdns_pcie_ops cdns_plat_ops = {
+ .link_up = cdns_pcie_linkup,
.cpu_addr_fixup = cdns_plat_cpu_addr_fixup,
+ .host_init_root_port = cdns_pcie_host_init_root_port,
+ .host_bar_ib_config = cdns_pcie_host_bar_ib_config,
+ .host_init_address_translation = cdns_pcie_host_init_address_translation,
+ .detect_quiet_min_delay_set = cdns_pcie_detect_quiet_min_delay_set,
+ .set_outbound_region = cdns_pcie_set_outbound_region,
+ .set_outbound_region_for_normal_msg =
+ cdns_pcie_set_outbound_region_for_normal_msg,
+ .reset_outbound_region = cdns_pcie_reset_outbound_region,
+};
+
+static const struct cdns_pcie_ops cdns_hpa_plat_ops = {
+ .start_link = cdns_pcie_hpa_startlink,
+ .stop_link = cdns_pcie_hpa_stop_link,
+ .link_up = cdns_pcie_hpa_linkup,
+ .host_init_root_port = cdns_pcie_hpa_host_init_root_port,
+ .host_bar_ib_config = cdns_pcie_hpa_host_bar_ib_config,
+ .host_init_address_translation = cdns_pcie_hpa_host_init_address_translation,
+ .detect_quiet_min_delay_set = cdns_pcie_hpa_detect_quiet_min_delay_set,
+ .set_outbound_region = cdns_pcie_hpa_set_outbound_region,
+ .set_outbound_region_for_normal_msg =
+ cdns_pcie_hpa_set_outbound_region_for_normal_msg,
+ .reset_outbound_region = cdns_pcie_hpa_reset_outbound_region,
};
static int cdns_plat_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 204e045aed8c..c0d3ab3c363f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -8,6 +8,45 @@
#include "pcie-cadence.h"
+bool cdns_pcie_linkup(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE);
+ if (pl_reg_val & GENMASK(0, 0))
+ return true;
+ return false;
+}
+
+bool cdns_pcie_hpa_linkup(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_DBG_STS_REG0);
+ if (pl_reg_val & GENMASK(0, 0))
+ return true;
+ return false;
+}
+
+int cdns_pcie_hpa_start_link(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_LAYER_CFG0);
+ pl_reg_val |= CDNS_PCIE_HPA_LINK_TRNG_EN_MASK;
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_LAYER_CFG0, pl_reg_val);
+ return 0;
+}
+
+void cdns_pcie_hpa_stop_link(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_LAYER_CFG0);
+ pl_reg_val &= ~CDNS_PCIE_HPA_LINK_TRNG_EN_MASK;
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_LAYER_CFG0, pl_reg_val);
+}
+
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
{
u32 delay = 0x3;
@@ -55,7 +94,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
desc1 = 0;
/*
- * Whatever Bit [23] is set or not inside DESC0 register of the outbound
+ * Whether Bit [23] is set or not inside DESC0 register of the outbound
* PCIe descriptor, the PCI function number must be set into
* Bits [26:24] of DESC0 anyway.
*
@@ -147,6 +186,161 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
+{
+ u32 delay = 0x3;
+ u32 ltssm_control_cap;
+
+ /* Set the LTSSM Detect Quiet state min. delay to 2ms. */
+ ltssm_control_cap = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG,
+ CDNS_PCIE_HPA_PHY_LAYER_CFG0);
+ ltssm_control_cap = ((ltssm_control_cap &
+ ~CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK) |
+ CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay));
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG,
+ CDNS_PCIE_HPA_PHY_LAYER_CFG0, ltssm_control_cap);
+}
+
+void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size)
+{
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values.
+ */
+ u64 sz = 1ULL << fls64(size - 1);
+ int nbits = ilog2(sz);
+ u32 addr0, addr1, desc0, desc1, ctrl0;
+
+ if (nbits < 8)
+ nbits = 8;
+
+ /* Set the PCI address */
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
+ (lower_32_bits(pci_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(pci_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), addr1);
+
+ /* Set the PCIe header descriptor */
+ if (is_io)
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_IO;
+ else
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MEM;
+ desc1 = 0;
+
+ /*
+ * Whether Bit [26] is set or not inside DESC0 register of the outbound
+ * PCIe descriptor, the PCI function number must be set into
+ * Bits [31:24] of DESC1 anyway.
+ *
+ * In Root Complex mode, the function number is always 0 but in Endpoint
+ * mode, the PCIe controller may support more than one function. This
+ * function number needs to be set properly into the outbound PCIe
+ * descriptor.
+ *
+ * Besides, setting Bit [26] is mandatory when in Root Complex mode:
+ * then the driver must provide the bus, resp. device, number in
+ * Bits [31:24] of DESC1, resp. Bits[23:16] of DESC0. Like the function
+ * number, the device number is always 0 in Root Complex mode.
+ *
+ * However when in Endpoint mode, we can clear Bit [26] of DESC0, hence
+ * the PCIe controller will use the captured values for the bus and
+ * device numbers.
+ */
+ if (pcie->is_rc) {
+ /* The device and function numbers are always 0. */
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) |
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+ } else {
+ /*
+ * Use captured values for bus and device numbers but still
+ * need to set the function number.
+ */
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
+ }
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1);
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
+}
+
+void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr)
+{
+ u32 addr0, addr1, desc0, desc1, ctrl0;
+
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
+ desc1 = 0;
+
+ /* See cdns_pcie_set_outbound_region() comments above. */
+ if (pcie->is_rc) {
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) |
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+ } else {
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
+ }
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
+}
+
+void cdns_pcie_hpa_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
+{
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), 0);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), 0);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), 0);
+}
+
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
int i = pcie->phy_count;
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index a39077d64d1d..c317fc10f5d1 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -816,6 +816,14 @@ int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
enum cdns_pcie_rp_bar bar,
u64 cpu_addr, u64 size,
unsigned long flags);
+void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc);
+int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr, u64 size,
+ unsigned long flags);
+int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc);
#else
static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
{
@@ -865,6 +873,10 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
}
#endif
+bool cdns_pcie_linkup(struct cdns_pcie *pcie);
+bool cdns_pcie_hpa_linkup(struct cdns_pcie *pcie);
+int cdns_pcie_hpa_start_link(struct cdns_pcie *pcie);
+void cdns_pcie_hpa_stop_link(struct cdns_pcie *pcie);
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
--
2.47.1
Hi,
kernel test robot noticed the following build warnings:
[auto build test WARNING on a24588245776dafc227243a01bfbeb8a59bafba9]
url: https://github.com/intel-lab-lkp/linux/commits/hans-zhang-cixtech-com/dt-bindings-pci-cadence-Extend-compatible-for-new-RP-configuration/20250414-094836
base: a24588245776dafc227243a01bfbeb8a59bafba9
patch link: https://lore.kernel.org/r/20250411103656.2740517-6-hans.zhang%40cixtech.com
patch subject: [PATCH v3 5/6] PCI: cadence: Add callback functions for RP and EP controller
config: powerpc64-randconfig-001-20250414 (https://download.01.org/0day-ci/archive/20250414/202504141719.svx3rf5x-lkp@intel.com/config)
compiler: clang version 17.0.6 (https://github.com/llvm/llvm-project 6009708b4367171ccdbf4b5905cb6a803753fe18)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250414/202504141719.svx3rf5x-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202504141719.svx3rf5x-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/pci/controller/cadence/pcie-cadence.c:303:12: warning: variable 'ctrl0' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
303 | desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/pci/controller/cadence/pcie-cadence.h:342:2: note: expanded from macro 'CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN'
342 | FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK, devfn)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/bitfield.h:115:3: note: expanded from macro 'FIELD_PREP'
115 | __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/bitfield.h:68:3: note: expanded from macro '__BF_FIELD_CHECK'
68 | BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
69 | ~((_mask) >> __bf_shf(_mask)) & \
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
70 | (0 + (_val)) : 0, \
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
71 | _pfx "value too large for the field"); \
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
include/linux/compiler_types.h:557:2: note: expanded from macro 'compiletime_assert'
557 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/compiler_types.h:545:2: note: expanded from macro '_compiletime_assert'
545 | __compiletime_assert(condition, msg, prefix, suffix)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/compiler_types.h:537:7: note: expanded from macro '__compiletime_assert'
537 | if (!(condition)) \
| ^~~~~~~~~~~~
drivers/pci/controller/cadence/pcie-cadence.c:323:46: note: uninitialized use occurs here
323 | CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
| ^~~~~
drivers/pci/controller/cadence/pcie-cadence.c:303:12: note: remove the 'if' if its condition is always true
303 | desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
| ^
drivers/pci/controller/cadence/pcie-cadence.h:342:2: note: expanded from macro 'CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN'
342 | FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK, devfn)
| ^
include/linux/bitfield.h:115:3: note: expanded from macro 'FIELD_PREP'
115 | __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
| ^
include/linux/bitfield.h:68:3: note: expanded from macro '__BF_FIELD_CHECK'
68 | BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
include/linux/compiler_types.h:557:2: note: expanded from macro 'compiletime_assert'
557 | _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
| ^
include/linux/compiler_types.h:545:2: note: expanded from macro '_compiletime_assert'
545 | __compiletime_assert(condition, msg, prefix, suffix)
| ^
include/linux/compiler_types.h:537:3: note: expanded from macro '__compiletime_assert'
537 | if (!(condition)) \
| ^
drivers/pci/controller/cadence/pcie-cadence.c:291:39: note: initialize the variable 'ctrl0' to silence this warning
291 | u32 addr0, addr1, desc0, desc1, ctrl0;
| ^
| = 0
1 warning generated.
--
>> drivers/pci/controller/cadence/pcie-cadence-host.c:122:3: warning: variable 'desc0' is uninitialized when used here [-Wuninitialized]
122 | desc0 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
| ^~~~~
drivers/pci/controller/cadence/pcie-cadence-host.c:80:18: note: initialize the variable 'desc0' to silence this warning
80 | u32 addr0, desc0, desc1, ctrl0;
| ^
| = 0
1 warning generated.
vim +303 drivers/pci/controller/cadence/pcie-cadence.c
286
287 void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
288 u8 busnr, u8 fn,
289 u32 r, u64 cpu_addr)
290 {
291 u32 addr0, addr1, desc0, desc1, ctrl0;
292
293 desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
294 desc1 = 0;
295
296 /* See cdns_pcie_set_outbound_region() comments above. */
297 if (pcie->is_rc) {
298 desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) |
299 CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
300 ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
301 CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
302 } else {
> 303 desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
304 }
305
306 addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
307 (lower_32_bits(cpu_addr) & GENMASK(31, 8));
308 addr1 = upper_32_bits(cpu_addr);
309
310 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
311 CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), 0);
312 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
313 CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), 0);
314 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
315 CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0);
316 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
317 CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1);
318 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
319 CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0);
320 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
321 CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1);
322 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
323 CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
324 }
325
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi,
kernel test robot noticed the following build errors:
[auto build test ERROR on a24588245776dafc227243a01bfbeb8a59bafba9]
url: https://github.com/intel-lab-lkp/linux/commits/hans-zhang-cixtech-com/dt-bindings-pci-cadence-Extend-compatible-for-new-RP-configuration/20250414-094836
base: a24588245776dafc227243a01bfbeb8a59bafba9
patch link: https://lore.kernel.org/r/20250411103656.2740517-6-hans.zhang%40cixtech.com
patch subject: [PATCH v3 5/6] PCI: cadence: Add callback functions for RP and EP controller
config: x86_64-buildonly-randconfig-002-20250414 (https://download.01.org/0day-ci/archive/20250414/202504141523.v9N9MrDJ-lkp@intel.com/config)
compiler: clang version 20.1.2 (https://github.com/llvm/llvm-project 58df0ef89dd64126512e4ee27b4ac3fd8ddf6247)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250414/202504141523.v9N9MrDJ-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202504141523.v9N9MrDJ-lkp@intel.com/
All errors (new ones prefixed by >>):
>> drivers/pci/controller/cadence/pcie-cadence-host.c:108:10: error: call to undeclared function 'FIELD_PREP'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
108 | addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
| ^
drivers/pci/controller/cadence/pcie-cadence.h:309:2: note: expanded from macro 'CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS'
309 | FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK, \
| ^
drivers/pci/controller/cadence/pcie-cadence-host.c:574:10: error: call to undeclared function 'FIELD_PREP'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
574 | value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
| ^
drivers/pci/controller/cadence/pcie-cadence.h:263:2: note: expanded from macro 'CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL'
263 | FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL_MASK, c)
| ^
drivers/pci/controller/cadence/pcie-cadence-host.c:610:10: error: call to undeclared function 'FIELD_PREP'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
610 | addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
| ^
drivers/pci/controller/cadence/pcie-cadence.h:361:2: note: expanded from macro 'CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS'
361 | FIELD_PREP(CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS_MASK, ((nbits) - 1))
| ^
drivers/pci/controller/cadence/pcie-cadence-host.c:663:10: error: call to undeclared function 'FIELD_PREP'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
663 | desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr);
| ^
drivers/pci/controller/cadence/pcie-cadence.h:339:2: note: expanded from macro 'CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS'
339 | FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS_MASK, bus)
| ^
4 errors generated.
--
>> drivers/pci/controller/cadence/pcie-cadence.c:199:8: error: call to undeclared function 'FIELD_PREP'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
199 | CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay));
| ^
drivers/pci/controller/cadence/pcie-cadence.h:375:2: note: expanded from macro 'CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY'
375 | FIELD_PREP(CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK, delay)
| ^
drivers/pci/controller/cadence/pcie-cadence.c:221:10: error: call to undeclared function 'FIELD_PREP'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
221 | addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
| ^
drivers/pci/controller/cadence/pcie-cadence.h:309:2: note: expanded from macro 'CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS'
309 | FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK, \
| ^
drivers/pci/controller/cadence/pcie-cadence.c:293:10: error: call to undeclared function 'FIELD_PREP'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
293 | desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
| ^
drivers/pci/controller/cadence/pcie-cadence.h:333:2: note: expanded from macro 'CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG'
333 | FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x10)
| ^
3 errors generated.
vim +/FIELD_PREP +108 drivers/pci/controller/cadence/pcie-cadence-host.c
72
73 void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
74 int where)
75 {
76 struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
77 struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
78 struct cdns_pcie *pcie = &rc->pcie;
79 unsigned int busn = bus->number;
80 u32 addr0, desc0, desc1, ctrl0;
81 u32 regval;
82
83 if (pci_is_root_bus(bus)) {
84 /*
85 * Only the root port (devfn == 0) is connected to this bus.
86 * All other PCI devices are behind some bridge hence on another
87 * bus.
88 */
89 if (devfn)
90 return NULL;
91
92 return pcie->reg_base + (where & 0xfff);
93 }
94
95 /*
96 * Clear AXI link-down status
97 */
98 regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN);
99 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN,
100 (regval & GENMASK(0, 0)));
101
102 desc1 = 0;
103 ctrl0 = 0;
104
105 /*
106 * Update Output registers for AXI region 0.
107 */
> 108 addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
109 CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
110 CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn);
111 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
112 CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0), addr0);
113
114 desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
115 CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0));
116 desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK;
117 desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
118 ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
119 CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
120
121 if (busn == bridge->busnr + 1)
122 desc0 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
123 else
124 desc0 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
125
126 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
127 CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0);
128 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
129 CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
130 cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
131 CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0);
132
133 return rc->cfg_base + (where & 0xfff);
134 }
135
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi,
kernel test robot noticed the following build errors:
[auto build test ERROR on a24588245776dafc227243a01bfbeb8a59bafba9]
url: https://github.com/intel-lab-lkp/linux/commits/hans-zhang-cixtech-com/dt-bindings-pci-cadence-Extend-compatible-for-new-RP-configuration/20250414-094836
base: a24588245776dafc227243a01bfbeb8a59bafba9
patch link: https://lore.kernel.org/r/20250411103656.2740517-6-hans.zhang%40cixtech.com
patch subject: [PATCH v3 5/6] PCI: cadence: Add callback functions for RP and EP controller
config: arc-randconfig-001-20250414 (https://download.01.org/0day-ci/archive/20250414/202504141101.J2GJGhRZ-lkp@intel.com/config)
compiler: arc-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250414/202504141101.J2GJGhRZ-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202504141101.J2GJGhRZ-lkp@intel.com/
All error/warnings (new ones prefixed by >>):
In file included from drivers/pci/controller/cadence/pcie-cadence-host.c:13:
drivers/pci/controller/cadence/pcie-cadence-host.c: In function 'cdns_pci_hpa_map_bus':
>> drivers/pci/controller/cadence/pcie-cadence.h:309:9: error: implicit declaration of function 'FIELD_PREP' [-Wimplicit-function-declaration]
309 | FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK, \
| ^~~~~~~~~~
drivers/pci/controller/cadence/pcie-cadence-host.c:108:17: note: in expansion of macro 'CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS'
108 | addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--
>> drivers/pci/controller/cadence/pcie-cadence-plat.c:59:23: error: 'cdns_pcie_hpa_startlink' undeclared here (not in a function); did you mean 'cdns_pcie_hpa_start_link'?
59 | .start_link = cdns_pcie_hpa_startlink,
| ^~~~~~~~~~~~~~~~~~~~~~~
| cdns_pcie_hpa_start_link
>> drivers/pci/controller/cadence/pcie-cadence-plat.c:58:35: warning: 'cdns_hpa_plat_ops' defined but not used [-Wunused-const-variable=]
58 | static const struct cdns_pcie_ops cdns_hpa_plat_ops = {
| ^~~~~~~~~~~~~~~~~
vim +/FIELD_PREP +309 drivers/pci/controller/cadence/pcie-cadence.h
fc9e872310321c Manikandan K Pillai 2025-04-11 304
fc9e872310321c Manikandan K Pillai 2025-04-11 305 /* Region r Outbound AXI to PCIe Address Translation Register 0 */
fc9e872310321c Manikandan K Pillai 2025-04-11 306 #define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r) (0x1010 + ((r) & 0x1f) * 0x0080)
fc9e872310321c Manikandan K Pillai 2025-04-11 307 #define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
fc9e872310321c Manikandan K Pillai 2025-04-11 308 #define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
fc9e872310321c Manikandan K Pillai 2025-04-11 @309 FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK, \
fc9e872310321c Manikandan K Pillai 2025-04-11 310 ((nbits) - 1))
fc9e872310321c Manikandan K Pillai 2025-04-11 311 #define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(23, 16)
fc9e872310321c Manikandan K Pillai 2025-04-11 312 #define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
fc9e872310321c Manikandan K Pillai 2025-04-11 313 FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK, devfn)
fc9e872310321c Manikandan K Pillai 2025-04-11 314 #define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(31, 24)
fc9e872310321c Manikandan K Pillai 2025-04-11 315 #define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
fc9e872310321c Manikandan K Pillai 2025-04-11 316 FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK, bus)
fc9e872310321c Manikandan K Pillai 2025-04-11 317
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On Fri, Apr 11, 2025 at 06:36:55PM +0800, hans.zhang@cixtech.com wrote:
> From: Manikandan K Pillai <mpillai@cadence.com>
>
> Add support for the Cadence PCIe HPA controller by adding
> the required callback functions. Update the common functions for
> RP and EP configuration. Invoke the relevant callback functions
> for platform probe of PCIe controller using the callback function.
>
> Signed-off-by: Manikandan K Pillai <mpillai@cadence.com>
> Co-developed-by: Hans Zhang <hans.zhang@cixtech.com>
> Signed-off-by: Hans Zhang <hans.zhang@cixtech.com>
> ---
> .../pci/controller/cadence/pcie-cadence-ep.c | 29 +-
> .../controller/cadence/pcie-cadence-host.c | 271 ++++++++++++++++--
> .../controller/cadence/pcie-cadence-plat.c | 23 ++
> drivers/pci/controller/cadence/pcie-cadence.c | 196 ++++++++++++-
> drivers/pci/controller/cadence/pcie-cadence.h | 12 +
> 5 files changed, 488 insertions(+), 43 deletions(-)
>
> diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
> index f3f956fa116b..f4961c760434 100644
> --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
> +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
> @@ -192,7 +192,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
> }
>
> fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
> - cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
> + pcie->ops->set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
>
> set_bit(r, &ep->ob_region_map);
> ep->ob_addr[r] = addr;
> @@ -214,7 +214,7 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
> if (r == ep->max_regions - 1)
> return;
>
> - cdns_pcie_reset_outbound_region(pcie, r);
> + pcie->ops->reset_outbound_region(pcie, r);
>
> ep->ob_addr[r] = 0;
> clear_bit(r, &ep->ob_region_map);
> @@ -329,8 +329,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
> if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
> ep->irq_pci_fn != fn)) {
> /* First region was reserved for IRQ writes. */
> - cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
> - ep->irq_phys_addr);
> + pcie->ops->set_outbound_region_for_normal_msg(pcie, 0, fn, 0, ep->irq_phys_addr);
> ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
> ep->irq_pci_fn = fn;
> }
> @@ -411,11 +410,11 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
> if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
> ep->irq_pci_fn != fn)) {
> /* First region was reserved for IRQ writes. */
> - cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
> - false,
> - ep->irq_phys_addr,
> - pci_addr & ~pci_addr_mask,
> - pci_addr_mask + 1);
> + pcie->ops->set_outbound_region(pcie, 0, fn, 0,
> + false,
> + ep->irq_phys_addr,
> + pci_addr & ~pci_addr_mask,
> + pci_addr_mask + 1);
> ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
> ep->irq_pci_fn = fn;
> }
> @@ -514,11 +513,11 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
> if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
> ep->irq_pci_fn != fn) {
> /* First region was reserved for IRQ writes. */
> - cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
> - false,
> - ep->irq_phys_addr,
> - msg_addr & ~pci_addr_mask,
> - pci_addr_mask + 1);
> + pcie->ops->set_outbound_region(pcie, 0, fn, 0,
> + false,
> + ep->irq_phys_addr,
> + msg_addr & ~pci_addr_mask,
> + pci_addr_mask + 1);
> ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
> ep->irq_pci_fn = fn;
> }
> @@ -869,7 +868,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
> set_bit(0, &ep->ob_region_map);
>
> if (ep->quirk_detect_quiet_flag)
> - cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
> + pcie->ops->detect_quiet_min_delay_set(&ep->pcie);
>
> spin_lock_init(&ep->lock);
>
> diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
> index ce035eef0a5c..c7066ea3b9e8 100644
> --- a/drivers/pci/controller/cadence/pcie-cadence-host.c
> +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
> @@ -60,10 +60,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
> /* Configuration Type 0 or Type 1 access. */
> desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
> CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
> - /*
> - * The bus number was already set once for all in desc1 by
> - * cdns_pcie_host_init_address_translation().
> - */
> +
> if (busn == bridge->busnr + 1)
> desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
> else
> @@ -73,12 +70,81 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
> return rc->cfg_base + (where & 0xfff);
> }
>
> +void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
> + int where)
> +{
> + struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
> + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
> + struct cdns_pcie *pcie = &rc->pcie;
> + unsigned int busn = bus->number;
> + u32 addr0, desc0, desc1, ctrl0;
> + u32 regval;
> +
> + if (pci_is_root_bus(bus)) {
> + /*
> + * Only the root port (devfn == 0) is connected to this bus.
> + * All other PCI devices are behind some bridge hence on another
> + * bus.
> + */
> + if (devfn)
> + return NULL;
> +
> + return pcie->reg_base + (where & 0xfff);
> + }
> +
> + /*
> + * Clear AXI link-down status
> + */
That is an odd thing to do in map_bus. Also, it is completely racy
because...
> + regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN,
> + (regval & GENMASK(0, 0)));
> +
What if the link goes down again here.
> + desc1 = 0;
> + ctrl0 = 0;
> +
> + /*
> + * Update Output registers for AXI region 0.
> + */
> + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0), addr0);
> +
> + desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0));
> + desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK;
> + desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
> + ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
> + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
> +
> + if (busn == bridge->busnr + 1)
> + desc0 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
> + else
> + desc0 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
> +
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0);
This is also racy with the read and write functions. Don't worry, lots
of other broken h/w like this...
Surely this new h/w supports ECAM style config accesses? If so, use
and support that mode instead.
> +
> + return rc->cfg_base + (where & 0xfff);
> +}
> +
> static struct pci_ops cdns_pcie_host_ops = {
> .map_bus = cdns_pci_map_bus,
> .read = pci_generic_config_read,
> .write = pci_generic_config_write,
> };
>
> +static struct pci_ops cdns_pcie_hpa_host_ops = {
> + .map_bus = cdns_pci_hpa_map_bus,
> + .read = pci_generic_config_read,
> + .write = pci_generic_config_write,
> +};
> +
> static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
> {
> u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
> @@ -154,8 +220,14 @@ static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
> {
> u32 val;
>
> - val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
> - cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
> + if (!pcie->is_hpa) {
> + val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
> + cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
> + } else {
> + val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL,
> + val | CDNS_PCIE_HPA_LM_TPM_CTRL_PTMRSEN);
> + }
> }
>
> static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
> @@ -340,8 +412,8 @@ static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
> */
> bar = cdns_pcie_host_find_min_bar(rc, size);
> if (bar != RP_BAR_UNDEFINED) {
> - ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
> - size, flags);
> + ret = pcie->ops->host_bar_ib_config(rc, bar, cpu_addr,
> + size, flags);
> if (ret)
> dev_err(dev, "IB BAR: %d config failed\n", bar);
> return ret;
> @@ -366,8 +438,7 @@ static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
> }
>
> winsize = bar_max_size[bar];
> - ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
> - flags);
> + ret = pcie->ops->host_bar_ib_config(rc, bar, cpu_addr, winsize, flags);
> if (ret) {
> dev_err(dev, "IB BAR: %d config failed\n", bar);
> return ret;
> @@ -408,8 +479,8 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
> if (list_empty(&bridge->dma_ranges)) {
> of_property_read_u32(np, "cdns,no-bar-match-nbits",
> &no_bar_nbits);
> - err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
> - (u64)1 << no_bar_nbits, 0);
> + err = pcie->ops->host_bar_ib_config(rc, RP_NO_BAR, 0x0,
> + (u64)1 << no_bar_nbits, 0);
> if (err)
> dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
> return err;
> @@ -467,17 +538,159 @@ int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
> u64 pci_addr = res->start - entry->offset;
>
> if (resource_type(res) == IORESOURCE_IO)
> - cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
> - true,
> - pci_pio_to_address(res->start),
> - pci_addr,
> - resource_size(res));
> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> + true,
> + pci_pio_to_address(res->start),
> + pci_addr,
> + resource_size(res));
> + else
> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> + false,
> + res->start,
> + pci_addr,
> + resource_size(res));
> +
> + r++;
> + }
> +
> + return cdns_pcie_host_map_dma_ranges(rc);
> +}
> +
> +int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc)
> +{
> + struct cdns_pcie *pcie = &rc->pcie;
> + u32 value, ctrl;
> +
> + /*
> + * Set the root complex BAR configuration register:
> + * - disable both BAR0 and BAR1.
> + * - enable Prefetchable Memory Base and Limit registers in type 1
> + * config space (64 bits).
> + * - enable IO Base and Limit registers in type 1 config
> + * space (32 bits).
> + */
> +
> + ctrl = CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED;
> + value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE |
> + CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS;
> + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG,
> + CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
> +
> + if (rc->vendor_id != 0xffff)
> + cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
> +
> + if (rc->device_id != 0xffff)
> + cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
> +
> + cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
> + cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
> + cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
> +
> + return 0;
> +}
> +
> +int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc,
> + enum cdns_pcie_rp_bar bar,
> + u64 cpu_addr, u64 size,
> + unsigned long flags)
> +{
> + struct cdns_pcie *pcie = &rc->pcie;
> + u32 addr0, addr1, aperture, value;
> +
> + if (!rc->avail_ib_bar[bar])
> + return -EBUSY;
> +
> + rc->avail_ib_bar[bar] = false;
> +
> + aperture = ilog2(size);
> + addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
> + (lower_32_bits(cpu_addr) & GENMASK(31, 8));
> + addr1 = upper_32_bits(cpu_addr);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
> + CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar), addr0);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
> + CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar), addr1);
> +
> + if (bar == RP_NO_BAR)
> + return 0;
> +
> + value = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG);
> + value &= ~(HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
> + HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
> + HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
> + HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
> + HPA_LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
> + if (size + cpu_addr >= SZ_4G) {
> + if (!(flags & IORESOURCE_PREFETCH))
> + value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
> + value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
> + } else {
> + if (!(flags & IORESOURCE_PREFETCH))
> + value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
> + value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
> + }
> +
> + value |= HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
> +
> + return 0;
> +}
> +
> +int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc)
> +{
> + struct cdns_pcie *pcie = &rc->pcie;
> + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
> + struct resource *cfg_res = rc->cfg_res;
> + struct resource_entry *entry;
> + u64 cpu_addr = cfg_res->start;
> + u32 addr0, addr1, desc1;
> + int r, busnr = 0;
> +
> + entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
> + if (entry)
> + busnr = entry->res->start;
> +
> + /*
> + * Reserve region 0 for PCI configure space accesses:
> + * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
> + * cdns_pci_map_bus(), other region registers are set here once for all.
> + */
> + addr1 = 0;
> + desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(0), addr1);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
> +
> + addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
> + (lower_32_bits(cpu_addr) & GENMASK(31, 8));
> + addr1 = upper_32_bits(cpu_addr);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(0), addr0);
> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
> + CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(0), addr1);
> +
> + r = 1;
> + resource_list_for_each_entry(entry, &bridge->windows) {
> + struct resource *res = entry->res;
> + u64 pci_addr = res->start - entry->offset;
> +
> + if (resource_type(res) == IORESOURCE_IO)
> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> + true,
> + pci_pio_to_address(res->start),
> + pci_addr,
> + resource_size(res));
> else
> - cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
> - false,
> - res->start,
> - pci_addr,
> - resource_size(res));
> + pcie->ops->set_outbound_region(pcie, busnr, 0, r,
> + false,
> + res->start,
> + pci_addr,
> + resource_size(res));
>
> r++;
> }
> @@ -489,11 +702,11 @@ int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
> {
> int err;
>
> - err = cdns_pcie_host_init_root_port(rc);
> + err = rc->pcie.ops->host_init_root_port(rc);
> if (err)
> return err;
>
> - return cdns_pcie_host_init_address_translation(rc);
> + return rc->pcie.ops->host_init_address_translation(rc);
> }
>
> int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
> @@ -503,7 +716,7 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
> int ret;
>
> if (rc->quirk_detect_quiet_flag)
> - cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
> + pcie->ops->detect_quiet_min_delay_set(&rc->pcie);
>
> cdns_pcie_host_enable_ptm_response(pcie);
>
> @@ -566,8 +779,12 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
> if (ret)
> return ret;
>
> - if (!bridge->ops)
> - bridge->ops = &cdns_pcie_host_ops;
> + if (!bridge->ops) {
> + if (pcie->is_hpa)
> + bridge->ops = &cdns_pcie_hpa_host_ops;
> + else
> + bridge->ops = &cdns_pcie_host_ops;
> + }
>
> ret = pci_host_probe(bridge);
> if (ret < 0)
> diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
> index b24176d4df1f..8d5fbaef0a3c 100644
> --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
> +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
> @@ -43,7 +43,30 @@ static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr)
> }
>
> static const struct cdns_pcie_ops cdns_plat_ops = {
> + .link_up = cdns_pcie_linkup,
> .cpu_addr_fixup = cdns_plat_cpu_addr_fixup,
> + .host_init_root_port = cdns_pcie_host_init_root_port,
> + .host_bar_ib_config = cdns_pcie_host_bar_ib_config,
> + .host_init_address_translation = cdns_pcie_host_init_address_translation,
> + .detect_quiet_min_delay_set = cdns_pcie_detect_quiet_min_delay_set,
> + .set_outbound_region = cdns_pcie_set_outbound_region,
> + .set_outbound_region_for_normal_msg =
> + cdns_pcie_set_outbound_region_for_normal_msg,
> + .reset_outbound_region = cdns_pcie_reset_outbound_region,
> +};
> +
> +static const struct cdns_pcie_ops cdns_hpa_plat_ops = {
> + .start_link = cdns_pcie_hpa_startlink,
> + .stop_link = cdns_pcie_hpa_stop_link,
> + .link_up = cdns_pcie_hpa_linkup,
> + .host_init_root_port = cdns_pcie_hpa_host_init_root_port,
> + .host_bar_ib_config = cdns_pcie_hpa_host_bar_ib_config,
> + .host_init_address_translation = cdns_pcie_hpa_host_init_address_translation,
> + .detect_quiet_min_delay_set = cdns_pcie_hpa_detect_quiet_min_delay_set,
> + .set_outbound_region = cdns_pcie_hpa_set_outbound_region,
> + .set_outbound_region_for_normal_msg =
> + cdns_pcie_hpa_set_outbound_region_for_normal_msg,
> + .reset_outbound_region = cdns_pcie_hpa_reset_outbound_region,
What exactly is shared between these 2 implementations. Link handling,
config space accesses, address translation, and host init are all
different. What's left to share? MSIs (if not passed thru) and
interrupts? I think it's questionable that this be the same driver.
A bunch of driver specific 'ops' is not the right direction despite
other drivers (DWC) having that. If there are common parts, then make
them library functions multiple drivers can call.
Rob
On 2025/4/12 04:24, Rob Herring wrote:
>> +void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
>> + int where)
>> +{
>> + struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
>> + struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
>> + struct cdns_pcie *pcie = &rc->pcie;
>> + unsigned int busn = bus->number;
>> + u32 addr0, desc0, desc1, ctrl0;
>> + u32 regval;
>> +
>> + if (pci_is_root_bus(bus)) {
>> + /*
>> + * Only the root port (devfn == 0) is connected to this bus.
>> + * All other PCI devices are behind some bridge hence on another
>> + * bus.
>> + */
>> + if (devfn)
>> + return NULL;
>> +
>> + return pcie->reg_base + (where & 0xfff);
>> + }
>> +
>> + /*
>> + * Clear AXI link-down status
>> + */
>
> That is an odd thing to do in map_bus. Also, it is completely racy
> because...
>
>> + regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN);
>> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN,
>> + (regval & GENMASK(0, 0)));
>> +
>
> What if the link goes down again here.
>
Hi Rob,
Thanks your for reply. Compared to Synopsys PCIe IP, Cadence PCIe IP has
one more register - CDNS_PCIE_HPA_AT_LINKDOWN. When the PCIe link
appears link down, the register -CDNS_PCIE_HPA_AT_LINKDOWN bit0 is set
to 1. Then, ECAM cannot access config space. You need to clear
CDNS_PCIE_HPA_AT_LINKDOWN bit0 to continue the access.
In my opinion, this is where Cadence PCIe IP doesn't make sense. As
Cadence users, we had no choice, and the chip had already been posted to
silicon.
Therefore, when we design the second-generation SOC, we will design an
SPI interrupt. When link down occurs, an SPI interrupt is generated. We
set CDNS_PCIE_HPA_AT_LINKDOWN bit0 to 0 in the interrupt function.
If there are other reasons, please Manikandan add.
In addition, by the way, ECAM is not accessible when the link is down.
For example, if the RP is set to hot reset, the hot reset cannot be
unreset. In this case, you need to use the APB to unreset. We mentioned
an RTL bug to Cadence that we currently can't fix with our first or
second generation chips. Cadence has not released RTL patch to us so far.
This software workaround approach will also later appear in the Cixtech
PCIe controller series patch.
Best regards,
Hans
On 2025/4/12 23:45, Hans Zhang wrote:
>>> + /*
>>> + * Clear AXI link-down status
>>> + */
>>
>> That is an odd thing to do in map_bus. Also, it is completely racy
>> because...
>>
>>> + regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
>>> CDNS_PCIE_HPA_AT_LINKDOWN);
>>> + cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
>>> CDNS_PCIE_HPA_AT_LINKDOWN,
>>> + (regval & GENMASK(0, 0)));
>>> +
>>
>> What if the link goes down again here.
>>
>
> Hi Rob,
>
> Thanks your for reply. Compared to Synopsys PCIe IP, Cadence PCIe IP has
> one more register - CDNS_PCIE_HPA_AT_LINKDOWN. When the PCIe link
> appears link down, the register -CDNS_PCIE_HPA_AT_LINKDOWN bit0 is set
> to 1. Then, ECAM cannot access config space. You need to clear
> CDNS_PCIE_HPA_AT_LINKDOWN bit0 to continue the access.
>
> In my opinion, this is where Cadence PCIe IP doesn't make sense. As
> Cadence users, we had no choice, and the chip had already been posted to
> silicon.
Supplement: Prior to this series of patches, in the current linux master
branch, Cadence first generation PCIe IP has the following code:
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
......
/* Clear AXI link-down status */
cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
......
}
It seems that all Cadence PCIe IPs have this problem.
>
> Therefore, when we design the second-generation SOC, we will design an
> SPI interrupt. When link down occurs, an SPI interrupt is generated. We
> set CDNS_PCIE_HPA_AT_LINKDOWN bit0 to 0 in the interrupt function.
>
> If there are other reasons, please Manikandan add.
>
>
>
> In addition, by the way, ECAM is not accessible when the link is down.
> For example, if the RP is set to hot reset, the hot reset cannot be
> unreset. In this case, you need to use the APB to unreset. We mentioned
> an RTL bug to Cadence that we currently can't fix with our first or
> second generation chips. Cadence has not released RTL patch to us so far.
>
> This software workaround approach will also later appear in the Cixtech
> PCIe controller series patch.
>
>
> Best regards,
> Hans
© 2016 - 2026 Red Hat, Inc.