Whereas most PCIe HW returns 0xffffffff on illegal accesses and the like,
by default Broadcom's STB PCIe controller effects an abort. Some SoCs --
7216 and its descendants -- have new HW that identifies error details.
This simple handler determines if the PCIe controller was the cause of the
abort and if so, prints out diagnostic info. Unfortunately, an abort still
occurs.
Care is taken to read the error registers only when the PCIe bridge is
active and the PCIe registers are acceptable. Otherwise, a "die" event
caused by something other than the PCIe could cause an abort if the PCIe
"die" handler tried to access registers when the bridge is off.
Example error output:
brcm-pcie 8b20000.pcie: Error: Mem Acc: 32bit, Read, @0x38000000
brcm-pcie 8b20000.pcie: Type: TO=0 Abt=0 UnspReq=1 AccDsble=0 BadAddr=0
Signed-off-by: Jim Quinlan <james.quinlan@broadcom.com>
---
drivers/pci/controller/pcie-brcmstb.c | 155 +++++++++++++++++++++++++-
1 file changed, 154 insertions(+), 1 deletion(-)
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 400854c893d8..abc56acad1fe 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -13,15 +13,18 @@
#include <linux/ioport.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include <linux/panic_notifier.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
@@ -151,6 +154,39 @@
#define MSI_INT_MASK_SET 0x10
#define MSI_INT_MASK_CLR 0x14
+/* Error report registers */
+#define PCIE_OUTB_ERR_TREAT 0x6000
+#define PCIE_OUTB_ERR_TREAT_CONFIG_MASK 0x1
+#define PCIE_OUTB_ERR_TREAT_MEM_MASK 0x2
+#define PCIE_OUTB_ERR_VALID 0x6004
+#define PCIE_OUTB_ERR_CLEAR 0x6008
+#define PCIE_OUTB_ERR_ACC_INFO 0x600c
+#define PCIE_OUTB_ERR_ACC_INFO_CFG_ERR_MASK 0x01
+#define PCIE_OUTB_ERR_ACC_INFO_MEM_ERR_MASK 0x02
+#define PCIE_OUTB_ERR_ACC_INFO_TYPE_64_MASK 0x04
+#define PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE_MASK 0x10
+#define PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES_MASK 0xff00
+#define PCIE_OUTB_ERR_ACC_ADDR 0x6010
+#define PCIE_OUTB_ERR_ACC_ADDR_BUS_MASK 0xff00000
+#define PCIE_OUTB_ERR_ACC_ADDR_DEV_MASK 0xf8000
+#define PCIE_OUTB_ERR_ACC_ADDR_FUNC_MASK 0x7000
+#define PCIE_OUTB_ERR_ACC_ADDR_REG_MASK 0xfff
+#define PCIE_OUTB_ERR_CFG_CAUSE 0x6014
+#define PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT_MASK 0x40
+#define PCIE_OUTB_ERR_CFG_CAUSE_ABORT_MASK 0x20
+#define PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ_MASK 0x10
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT_MASK 0x4
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED_MASK 0x2
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT__MASK 0x1
+#define PCIE_OUTB_ERR_MEM_ADDR_LO 0x6018
+#define PCIE_OUTB_ERR_MEM_ADDR_HI 0x601c
+#define PCIE_OUTB_ERR_MEM_CAUSE 0x6020
+#define PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT_MASK 0x40
+#define PCIE_OUTB_ERR_MEM_CAUSE_ABORT_MASK 0x20
+#define PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ_MASK 0x10
+#define PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED_MASK 0x2
+#define PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR_MASK 0x1
+
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
@@ -301,6 +337,8 @@ struct brcm_pcie {
struct subdev_regulators *sr;
bool ep_wakeup_capable;
const struct pcie_cfg_data *cfg;
+ struct notifier_block die_notifier;
+ struct notifier_block panic_notifier;
bool bridge_on;
spinlock_t bridge_lock;
};
@@ -1711,6 +1749,115 @@ static int brcm_pcie_resume_noirq(struct device *dev)
return ret;
}
+/* Dump out PCIe errors on die or panic */
+static int _brcm_pcie_dump_err(struct brcm_pcie *pcie,
+ const char *type)
+{
+ void __iomem *base = pcie->base;
+ int i, is_cfg_err, is_mem_err, lanes;
+ char *width_str, *direction_str, lanes_str[9];
+ u32 info, cfg_addr, cfg_cause, mem_cause, lo, hi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcie->bridge_lock, flags);
+ /* Don't access registers when the bridge is off */
+ if (!pcie->bridge_on || readl(base + PCIE_OUTB_ERR_VALID) == 0) {
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+ return NOTIFY_DONE;
+ }
+
+ /* Read all necessary registers so we can release the spinlock ASAP */
+ info = readl(base + PCIE_OUTB_ERR_ACC_INFO);
+ is_cfg_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_CFG_ERR_MASK);
+ is_mem_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_MEM_ERR_MASK);
+ if (is_cfg_err) {
+ cfg_addr = readl(base + PCIE_OUTB_ERR_ACC_ADDR);
+ cfg_cause = readl(base + PCIE_OUTB_ERR_CFG_CAUSE);
+ }
+ if (is_mem_err) {
+ mem_cause = readl(base + PCIE_OUTB_ERR_MEM_CAUSE);
+ lo = readl(base + PCIE_OUTB_ERR_MEM_ADDR_LO);
+ hi = readl(base + PCIE_OUTB_ERR_MEM_ADDR_HI);
+ }
+ /* We've got all of the info, clear the error */
+ writel(1, base + PCIE_OUTB_ERR_CLEAR);
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+
+ dev_err(pcie->dev, "handling %s error notification\n", type);
+ width_str = (info & PCIE_OUTB_ERR_ACC_INFO_TYPE_64_MASK) ? "64bit" : "32bit";
+ direction_str = (info & PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE_MASK) ? "Write" : "Read";
+ lanes = FIELD_GET(PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES_MASK, info);
+ for (i = 0, lanes_str[8] = 0; i < 8; i++)
+ lanes_str[i] = (lanes & (1 << i)) ? '1' : '0';
+
+ if (is_cfg_err) {
+ int bus = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_BUS_MASK, cfg_addr);
+ int dev = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_DEV_MASK, cfg_addr);
+ int func = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_FUNC_MASK, cfg_addr);
+ int reg = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_REG_MASK, cfg_addr);
+
+ dev_err(pcie->dev, "Error: CFG Acc, %s, %s, Bus=%d, Dev=%d, Fun=%d, Reg=0x%x, lanes=%s\n",
+ width_str, direction_str, bus, dev, func, reg, lanes_str);
+ dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccTO=%d AccDsbld=%d Acc64bit=%d\n",
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT_MASK),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ABORT_MASK),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ_MASK),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT_MASK),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED_MASK),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT__MASK));
+ }
+
+ if (is_mem_err) {
+ u64 addr = ((u64)hi << 32) | (u64)lo;
+
+ dev_err(pcie->dev, "Error: Mem Acc, %s, %s, @0x%llx, lanes=%s\n",
+ width_str, direction_str, addr, lanes_str);
+ dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccDsble=%d BadAddr=%d\n",
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT_MASK),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ABORT_MASK),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ_MASK),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED_MASK),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR_MASK));
+ }
+
+ return NOTIFY_OK;
+}
+
+static int brcm_pcie_die_notify_cb(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ struct brcm_pcie *pcie =
+ container_of(self, struct brcm_pcie, die_notifier);
+
+ return _brcm_pcie_dump_err(pcie, "Die");
+}
+
+static int brcm_pcie_panic_notify_cb(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ struct brcm_pcie *pcie =
+ container_of(self, struct brcm_pcie, panic_notifier);
+
+ return _brcm_pcie_dump_err(pcie, "Panic");
+}
+
+static void brcm_register_die_notifiers(struct brcm_pcie *pcie)
+{
+ pcie->panic_notifier.notifier_call = brcm_pcie_panic_notify_cb;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pcie->panic_notifier);
+
+ pcie->die_notifier.notifier_call = brcm_pcie_die_notify_cb;
+ register_die_notifier(&pcie->die_notifier);
+}
+
+static void brcm_unregister_die_notifiers(struct brcm_pcie *pcie)
+{
+ unregister_die_notifier(&pcie->die_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &pcie->panic_notifier);
+}
+
static void __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
@@ -1729,6 +1876,9 @@ static void brcm_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
+ if (pcie->cfg->has_err_report)
+ brcm_unregister_die_notifiers(pcie);
+
__brcm_pcie_remove(pcie);
}
@@ -1829,6 +1979,7 @@ static const struct pcie_cfg_data bcm7216_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
.has_phy = true,
.num_inbound_wins = 3,
+ .has_err_report = true,
};
static const struct pcie_cfg_data bcm7712_cfg = {
@@ -2003,8 +2154,10 @@ static int brcm_pcie_probe(struct platform_device *pdev)
return ret;
}
- if (pcie->cfg->has_err_report)
+ if (pcie->cfg->has_err_report) {
spin_lock_init(&pcie->bridge_lock);
+ brcm_register_die_notifiers(pcie);
+ }
return 0;
--
2.34.1
On Fri, Jun 13, 2025 at 06:08:43PM -0400, Jim Quinlan wrote: > Whereas most PCIe HW returns 0xffffffff on illegal accesses and the like, > by default Broadcom's STB PCIe controller effects an abort. Some SoCs -- > 7216 and its descendants -- have new HW that identifies error details. > > This simple handler determines if the PCIe controller was the cause of the > abort and if so, prints out diagnostic info. Unfortunately, an abort still > occurs. > > Care is taken to read the error registers only when the PCIe bridge is > active and the PCIe registers are acceptable. Otherwise, a "die" event > caused by something other than the PCIe could cause an abort if the PCIe > "die" handler tried to access registers when the bridge is off. s/acceptable/accessible/ ? > Example error output: > brcm-pcie 8b20000.pcie: Error: Mem Acc: 32bit, Read, @0x38000000 > brcm-pcie 8b20000.pcie: Type: TO=0 Abt=0 UnspReq=1 AccDsble=0 BadAddr=0 Ugly that we have to do this at all, but since I guess it's the best we can do, looks ok to me. > Signed-off-by: Jim Quinlan <james.quinlan@broadcom.com> > --- > drivers/pci/controller/pcie-brcmstb.c | 155 +++++++++++++++++++++++++- > 1 file changed, 154 insertions(+), 1 deletion(-) > > diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c > index 400854c893d8..abc56acad1fe 100644 > --- a/drivers/pci/controller/pcie-brcmstb.c > +++ b/drivers/pci/controller/pcie-brcmstb.c > @@ -13,15 +13,18 @@ > #include <linux/ioport.h> > #include <linux/irqchip/chained_irq.h> > #include <linux/irqdomain.h> > +#include <linux/kdebug.h> > #include <linux/kernel.h> > #include <linux/list.h> > #include <linux/log2.h> > #include <linux/module.h> > #include <linux/msi.h> > +#include <linux/notifier.h> > #include <linux/of_address.h> > #include <linux/of_irq.h> > #include <linux/of_pci.h> > #include <linux/of_platform.h> > +#include <linux/panic_notifier.h> > #include <linux/pci.h> > #include <linux/pci-ecam.h> > #include <linux/printk.h> > @@ -151,6 +154,39 @@ > #define MSI_INT_MASK_SET 0x10 > #define MSI_INT_MASK_CLR 0x14 > > +/* Error report registers */ > +#define PCIE_OUTB_ERR_TREAT 0x6000 > +#define PCIE_OUTB_ERR_TREAT_CONFIG_MASK 0x1 > +#define PCIE_OUTB_ERR_TREAT_MEM_MASK 0x2 > +#define PCIE_OUTB_ERR_VALID 0x6004 > +#define PCIE_OUTB_ERR_CLEAR 0x6008 > +#define PCIE_OUTB_ERR_ACC_INFO 0x600c > +#define PCIE_OUTB_ERR_ACC_INFO_CFG_ERR_MASK 0x01 > +#define PCIE_OUTB_ERR_ACC_INFO_MEM_ERR_MASK 0x02 > +#define PCIE_OUTB_ERR_ACC_INFO_TYPE_64_MASK 0x04 > +#define PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE_MASK 0x10 Including "MASK" in these names seems kind of pointless since they're all single bits. Some drivers don't bother with "MASK" even for the multi-bit fields, since uses read pretty naturally without it. But I suppose this is following the existing brcmstb style. > +#define PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES_MASK 0xff00 > +#define PCIE_OUTB_ERR_ACC_ADDR 0x6010 > +#define PCIE_OUTB_ERR_ACC_ADDR_BUS_MASK 0xff00000 > +#define PCIE_OUTB_ERR_ACC_ADDR_DEV_MASK 0xf8000 > +#define PCIE_OUTB_ERR_ACC_ADDR_FUNC_MASK 0x7000 > +#define PCIE_OUTB_ERR_ACC_ADDR_REG_MASK 0xfff > +#define PCIE_OUTB_ERR_CFG_CAUSE 0x6014 > +#define PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT_MASK 0x40 > +#define PCIE_OUTB_ERR_CFG_CAUSE_ABORT_MASK 0x20 > +#define PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ_MASK 0x10 > +#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT_MASK 0x4 > +#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED_MASK 0x2 > +#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT__MASK 0x1 > +#define PCIE_OUTB_ERR_MEM_ADDR_LO 0x6018 > +#define PCIE_OUTB_ERR_MEM_ADDR_HI 0x601c > +#define PCIE_OUTB_ERR_MEM_CAUSE 0x6020 > +#define PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT_MASK 0x40 > +#define PCIE_OUTB_ERR_MEM_CAUSE_ABORT_MASK 0x20 > +#define PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ_MASK 0x10 > +#define PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED_MASK 0x2 > +#define PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR_MASK 0x1 > + > #define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1 > #define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0 > > @@ -301,6 +337,8 @@ struct brcm_pcie { > struct subdev_regulators *sr; > bool ep_wakeup_capable; > const struct pcie_cfg_data *cfg; > + struct notifier_block die_notifier; > + struct notifier_block panic_notifier; > bool bridge_on; > spinlock_t bridge_lock; > }; > @@ -1711,6 +1749,115 @@ static int brcm_pcie_resume_noirq(struct device *dev) > return ret; > } > > +/* Dump out PCIe errors on die or panic */ > +static int _brcm_pcie_dump_err(struct brcm_pcie *pcie, > + const char *type) Fits on one line. > +{ > + void __iomem *base = pcie->base; > + int i, is_cfg_err, is_mem_err, lanes; > + char *width_str, *direction_str, lanes_str[9]; > + u32 info, cfg_addr, cfg_cause, mem_cause, lo, hi; > + unsigned long flags; > + > + spin_lock_irqsave(&pcie->bridge_lock, flags); > + /* Don't access registers when the bridge is off */ > + if (!pcie->bridge_on || readl(base + PCIE_OUTB_ERR_VALID) == 0) { > + spin_unlock_irqrestore(&pcie->bridge_lock, flags); > + return NOTIFY_DONE; > + } > + > + /* Read all necessary registers so we can release the spinlock ASAP */ > + info = readl(base + PCIE_OUTB_ERR_ACC_INFO); > + is_cfg_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_CFG_ERR_MASK); > + is_mem_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_MEM_ERR_MASK); > + if (is_cfg_err) { > + cfg_addr = readl(base + PCIE_OUTB_ERR_ACC_ADDR); > + cfg_cause = readl(base + PCIE_OUTB_ERR_CFG_CAUSE); > + } > + if (is_mem_err) { > + mem_cause = readl(base + PCIE_OUTB_ERR_MEM_CAUSE); > + lo = readl(base + PCIE_OUTB_ERR_MEM_ADDR_LO); > + hi = readl(base + PCIE_OUTB_ERR_MEM_ADDR_HI); > + } > + /* We've got all of the info, clear the error */ > + writel(1, base + PCIE_OUTB_ERR_CLEAR); > + spin_unlock_irqrestore(&pcie->bridge_lock, flags); > + > + dev_err(pcie->dev, "handling %s error notification\n", type); > + width_str = (info & PCIE_OUTB_ERR_ACC_INFO_TYPE_64_MASK) ? "64bit" : "32bit"; > + direction_str = (info & PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE_MASK) ? "Write" : "Read"; > + lanes = FIELD_GET(PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES_MASK, info); > + for (i = 0, lanes_str[8] = 0; i < 8; i++) > + lanes_str[i] = (lanes & (1 << i)) ? '1' : '0'; > + > + if (is_cfg_err) { > + int bus = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_BUS_MASK, cfg_addr); > + int dev = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_DEV_MASK, cfg_addr); > + int func = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_FUNC_MASK, cfg_addr); > + int reg = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_REG_MASK, cfg_addr); > + > + dev_err(pcie->dev, "Error: CFG Acc, %s, %s, Bus=%d, Dev=%d, Fun=%d, Reg=0x%x, lanes=%s\n", > + width_str, direction_str, bus, dev, func, reg, lanes_str); > + dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccTO=%d AccDsbld=%d Acc64bit=%d\n", > + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT_MASK), > + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ABORT_MASK), > + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ_MASK), > + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT_MASK), > + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED_MASK), > + !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT__MASK)); > + } > + > + if (is_mem_err) { > + u64 addr = ((u64)hi << 32) | (u64)lo; > + > + dev_err(pcie->dev, "Error: Mem Acc, %s, %s, @0x%llx, lanes=%s\n", > + width_str, direction_str, addr, lanes_str); > + dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccDsble=%d BadAddr=%d\n", > + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT_MASK), > + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ABORT_MASK), > + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ_MASK), > + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED_MASK), > + !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR_MASK)); > + } > + > + return NOTIFY_OK; > +} > + > +static int brcm_pcie_die_notify_cb(struct notifier_block *self, > + unsigned long v, void *p) > +{ > + struct brcm_pcie *pcie = > + container_of(self, struct brcm_pcie, die_notifier); > + > + return _brcm_pcie_dump_err(pcie, "Die"); > +} > + > +static int brcm_pcie_panic_notify_cb(struct notifier_block *self, > + unsigned long v, void *p) > +{ > + struct brcm_pcie *pcie = > + container_of(self, struct brcm_pcie, panic_notifier); > + > + return _brcm_pcie_dump_err(pcie, "Panic"); > +} > + > +static void brcm_register_die_notifiers(struct brcm_pcie *pcie) > +{ > + pcie->panic_notifier.notifier_call = brcm_pcie_panic_notify_cb; > + atomic_notifier_chain_register(&panic_notifier_list, > + &pcie->panic_notifier); > + > + pcie->die_notifier.notifier_call = brcm_pcie_die_notify_cb; > + register_die_notifier(&pcie->die_notifier); > +} > + > +static void brcm_unregister_die_notifiers(struct brcm_pcie *pcie) > +{ > + unregister_die_notifier(&pcie->die_notifier); > + atomic_notifier_chain_unregister(&panic_notifier_list, > + &pcie->panic_notifier); > +} > + > static void __brcm_pcie_remove(struct brcm_pcie *pcie) > { > brcm_msi_remove(pcie); > @@ -1729,6 +1876,9 @@ static void brcm_pcie_remove(struct platform_device *pdev) > > pci_stop_root_bus(bridge->bus); > pci_remove_root_bus(bridge->bus); > + if (pcie->cfg->has_err_report) > + brcm_unregister_die_notifiers(pcie); > + > __brcm_pcie_remove(pcie); > } > > @@ -1829,6 +1979,7 @@ static const struct pcie_cfg_data bcm7216_cfg = { > .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, > .has_phy = true, > .num_inbound_wins = 3, > + .has_err_report = true, > }; > > static const struct pcie_cfg_data bcm7712_cfg = { > @@ -2003,8 +2154,10 @@ static int brcm_pcie_probe(struct platform_device *pdev) > return ret; > } > > - if (pcie->cfg->has_err_report) > + if (pcie->cfg->has_err_report) { > spin_lock_init(&pcie->bridge_lock); > + brcm_register_die_notifiers(pcie); > + } > > return 0; > > -- > 2.34.1 >
On Fri, Jun 13, 2025 at 06:08:43PM -0400, Jim Quinlan wrote: > Whereas most PCIe HW returns 0xffffffff on illegal accesses and the like, > by default Broadcom's STB PCIe controller effects an abort. Some SoCs -- > 7216 and its descendants -- have new HW that identifies error details. What's the long term plan for this? This abort is a huge problem that we're seeing across arm64 platforms. Forcing a panic and reboot for every uncorrectable error is pretty hard to deal with. Is there a plan to someday recover from these aborts? Or change the hardware so it can at least be configured to return ~0 data after logging the error in the hardware registers? > This simple handler determines if the PCIe controller was the cause of the > abort and if so, prints out diagnostic info. Unfortunately, an abort still > occurs. > > Care is taken to read the error registers only when the PCIe bridge is > active and the PCIe registers are acceptable. Otherwise, a "die" event > caused by something other than the PCIe could cause an abort if the PCIe > "die" handler tried to access registers when the bridge is off. Checking whether the bridge is active is a "mostly-works" situation since it's always racy. > Example error output: > brcm-pcie 8b20000.pcie: Error: Mem Acc: 32bit, Read, @0x38000000 > brcm-pcie 8b20000.pcie: Type: TO=0 Abt=0 UnspReq=1 AccDsble=0 BadAddr=0
On Wed, Aug 6, 2025 at 2:15 PM Bjorn Helgaas <helgaas@kernel.org> wrote: > > On Fri, Jun 13, 2025 at 06:08:43PM -0400, Jim Quinlan wrote: > > Whereas most PCIe HW returns 0xffffffff on illegal accesses and the like, > > by default Broadcom's STB PCIe controller effects an abort. Some SoCs -- > > 7216 and its descendants -- have new HW that identifies error details. > > What's the long term plan for this? This abort is a huge problem that > we're seeing across arm64 platforms. Forcing a panic and reboot for > every uncorrectable error is pretty hard to deal with. Hello Bjorn, Are you referring to STB/CM systems, Rpi, or something else altogether? > > Is there a plan to someday recover from these aborts? Or change the > hardware so it can at least be configured to return ~0 data after > logging the error in the hardware registers? Some of our upcoming chips will have the ability to do nothing on errant PCIe writes and return 0xffffffff on errant PCIe reads. But none of our STB/CM chips do this currently. I've been asking for this behavior for years but I have limited influence on what happens in HW. > > > > This simple handler determines if the PCIe controller was the cause of the > > abort and if so, prints out diagnostic info. Unfortunately, an abort still > > occurs. > > > > Care is taken to read the error registers only when the PCIe bridge is > > active and the PCIe registers are acceptable. Otherwise, a "die" event > > caused by something other than the PCIe could cause an abort if the PCIe > > "die" handler tried to access registers when the bridge is off. > > Checking whether the bridge is active is a "mostly-works" situation > since it's always racy. I'm not sure I understand the "racy" comment. If the PCIe bridge is off, we do not read the PCIe error registers. In this case, PCIe is probably not the cause of the panic. In the rare case the PCIe bridge is off and it was the PCIe that caused the panic, nothing gets reported, and this is where we are without this commit. Perhaps this is what you mean by "mostly-works". But this is the best that can be done with SW given our HW. Regards, Jim Quinlan Broadcom STB/CM > > > > Example error output: > > brcm-pcie 8b20000.pcie: Error: Mem Acc: 32bit, Read, @0x38000000 > > brcm-pcie 8b20000.pcie: Type: TO=0 Abt=0 UnspReq=1 AccDsble=0 BadAddr=0
On Wed, Aug 06, 2025 at 02:38:12PM -0400, Jim Quinlan wrote: > On Wed, Aug 6, 2025 at 2:15 PM Bjorn Helgaas <helgaas@kernel.org> wrote: > > > > On Fri, Jun 13, 2025 at 06:08:43PM -0400, Jim Quinlan wrote: > > > Whereas most PCIe HW returns 0xffffffff on illegal accesses and the like, > > > by default Broadcom's STB PCIe controller effects an abort. Some SoCs -- > > > 7216 and its descendants -- have new HW that identifies error details. > > > > What's the long term plan for this? This abort is a huge problem that > > we're seeing across arm64 platforms. Forcing a panic and reboot for > > every uncorrectable error is pretty hard to deal with. > > Are you referring to STB/CM systems, Rpi, or something else altogether? Just in general. I saw this recently with a Nuvoton NPCM8xx PCIe controller. I'm not an arm64 guy, but I've been told that these aborts are basically unrecoverable from a kernel perspective. For some reason several PCIe controllers intended for arm64 seem to raise aborts on PCIe errors. At the moment, that means we can't recover from errors like surprise unplugs and other things that *should* be recoverable (perhaps at the cost of resetting or disabling a PCIe device). > > Is there a plan to someday recover from these aborts? Or change the > > hardware so it can at least be configured to return ~0 data after > > logging the error in the hardware registers? > > Some of our upcoming chips will have the ability to do nothing on > errant PCIe writes and return 0xffffffff on errant PCIe reads. But > none of our STB/CM chips do this currently. I've been asking for > this behavior for years but I have limited influence on what happens > in HW. Fingers crossed for either that or some other way to make these things recoverable. > > > This simple handler determines if the PCIe controller was the > > > cause of the abort and if so, prints out diagnostic info. > > > Unfortunately, an abort still occurs. > > > > > > Care is taken to read the error registers only when the PCIe > > > bridge is active and the PCIe registers are acceptable. > > > Otherwise, a "die" event caused by something other than the PCIe > > > could cause an abort if the PCIe "die" handler tried to access > > > registers when the bridge is off. > > > > Checking whether the bridge is active is a "mostly-works" > > situation since it's always racy. > > I'm not sure I understand the "racy" comment. If the PCIe bridge is > off, we do not read the PCIe error registers. In this case, PCIe is > probably not the cause of the panic. In the rare case the PCIe > bridge is off and it was the PCIe that caused the panic, nothing > gets reported, and this is where we are without this commit. > Perhaps this is what you mean by "mostly-works". But this is the > best that can be done with SW given our HW. Right, my fault. The error report registers don't look like standard PCIe things, so I suppose they are on the host side, not the PCIe side, so they're probably guaranteed to be accessible and non-racy unless the bridge is in reset. Bjorn
On 8/6/25 11:50, Bjorn Helgaas wrote: >> I'm not sure I understand the "racy" comment. If the PCIe bridge is >> off, we do not read the PCIe error registers. In this case, PCIe is >> probably not the cause of the panic. In the rare case the PCIe >> bridge is off and it was the PCIe that caused the panic, nothing >> gets reported, and this is where we are without this commit. >> Perhaps this is what you mean by "mostly-works". But this is the >> best that can be done with SW given our HW. > > Right, my fault. The error report registers don't look like standard > PCIe things, so I suppose they are on the host side, not the PCIe > side, so they're probably guaranteed to be accessible and non-racy > unless the bridge is in reset. To expand upon that part, the situation that I ran in we had the PCIe link down and therefore clock gated the PCIe root complex hardware to conserve power. Eventually I did hit a voluntary panic, and since all panic notifiers registered are invoked in succession, the one registered for the PCIe RC was invoked as well and accessing clock gated registers would not work and trigger another fault which would be confusing and mingle with the panic I was trying to debug initially. Hence this check, and a clock gated PCIe RC would not be logging any errors anyway. -- Florian
On Wed, Aug 06, 2025 at 01:41:35PM GMT, Florian Fainelli wrote: > On 8/6/25 11:50, Bjorn Helgaas wrote: > > > I'm not sure I understand the "racy" comment. If the PCIe bridge is > > > off, we do not read the PCIe error registers. In this case, PCIe is > > > probably not the cause of the panic. In the rare case the PCIe > > > bridge is off and it was the PCIe that caused the panic, nothing > > > gets reported, and this is where we are without this commit. > > > Perhaps this is what you mean by "mostly-works". But this is the > > > best that can be done with SW given our HW. > > > > Right, my fault. The error report registers don't look like standard > > PCIe things, so I suppose they are on the host side, not the PCIe > > side, so they're probably guaranteed to be accessible and non-racy > > unless the bridge is in reset. > > To expand upon that part, the situation that I ran in we had the PCIe link > down and therefore clock gated the PCIe root complex hardware to conserve > power. Eventually I did hit a voluntary panic, and since all panic notifiers > registered are invoked in succession, the one registered for the PCIe RC was > invoked as well and accessing clock gated registers would not work and > trigger another fault which would be confusing and mingle with the panic I > was trying to debug initially. Hence this check, and a clock gated PCIe RC > would not be logging any errors anyway. May I ask how you are recovering from link down? Can the driver detect link down using any platform IRQ? - Mani -- மணிவண்ணன் சதாசிவம்
On 8/6/25 22:26, Manivannan Sadhasivam wrote: > On Wed, Aug 06, 2025 at 01:41:35PM GMT, Florian Fainelli wrote: >> On 8/6/25 11:50, Bjorn Helgaas wrote: >>>> I'm not sure I understand the "racy" comment. If the PCIe bridge is >>>> off, we do not read the PCIe error registers. In this case, PCIe is >>>> probably not the cause of the panic. In the rare case the PCIe >>>> bridge is off and it was the PCIe that caused the panic, nothing >>>> gets reported, and this is where we are without this commit. >>>> Perhaps this is what you mean by "mostly-works". But this is the >>>> best that can be done with SW given our HW. >>> >>> Right, my fault. The error report registers don't look like standard >>> PCIe things, so I suppose they are on the host side, not the PCIe >>> side, so they're probably guaranteed to be accessible and non-racy >>> unless the bridge is in reset. >> >> To expand upon that part, the situation that I ran in we had the PCIe link >> down and therefore clock gated the PCIe root complex hardware to conserve >> power. Eventually I did hit a voluntary panic, and since all panic notifiers >> registered are invoked in succession, the one registered for the PCIe RC was >> invoked as well and accessing clock gated registers would not work and >> trigger another fault which would be confusing and mingle with the panic I >> was trying to debug initially. Hence this check, and a clock gated PCIe RC >> would not be logging any errors anyway. > > May I ask how you are recovering from link down? Can the driver detect link down > using any platform IRQ? Just to be clear, what I was describing here is not a link down recovery. The point I was trying to convey is that we have multiple busses in our system (DRAM, on-chip registers, PCIe) and each one of them has its own way of reporting errors, so if we get a form of system error/kernel panic we like to interrogate each one of them to figure out the cause. In the case I was describing, I was actually tracking down a bad DRAM access, but the error reporting came from the on-chip register arbiter because prior to that we had been trying to read from the clock gated PCIe bridge whether the PCIe bridge was responsible for the bad access. This leads you to an incorrect source of the bad access, and so that's why we guard the panic handler invocation within the PCIe root complex with a check whether the bridge is in reset or not. If this is still not clear, let me know. -- Florian
On Thu, Aug 7, 2025 at 1:26 AM Manivannan Sadhasivam <mani@kernel.org> wrote: > > On Wed, Aug 06, 2025 at 01:41:35PM GMT, Florian Fainelli wrote: > > On 8/6/25 11:50, Bjorn Helgaas wrote: > > > > I'm not sure I understand the "racy" comment. If the PCIe bridge is > > > > off, we do not read the PCIe error registers. In this case, PCIe is > > > > probably not the cause of the panic. In the rare case the PCIe > > > > bridge is off and it was the PCIe that caused the panic, nothing > > > > gets reported, and this is where we are without this commit. > > > > Perhaps this is what you mean by "mostly-works". But this is the > > > > best that can be done with SW given our HW. > > > > > > Right, my fault. The error report registers don't look like standard > > > PCIe things, so I suppose they are on the host side, not the PCIe > > > side, so they're probably guaranteed to be accessible and non-racy > > > unless the bridge is in reset. > > > > To expand upon that part, the situation that I ran in we had the PCIe link > > down and therefore clock gated the PCIe root complex hardware to conserve > > power. Eventually I did hit a voluntary panic, and since all panic notifiers > > registered are invoked in succession, the one registered for the PCIe RC was > > invoked as well and accessing clock gated registers would not work and > > trigger another fault which would be confusing and mingle with the panic I > > was trying to debug initially. Hence this check, and a clock gated PCIe RC > > would not be logging any errors anyway. > > May I ask how you are recovering from link down? Can the driver detect link down > using any platform IRQ? We do have link up/down interrupts on most of our SoCs but we once implemented a handler and the interrupts were unreliable. We informed HW but I do not think they implemented any changes. We will try again at some point to ascertain the extent of the issue. AFAICT such a handler is not a panacea. Having a link-down handler may be able to immediately prevent panics for config space accesses by intercepting them but not incoming memory accesses from the host or endpoint device. Regards, Jim Quinlan Broadcom STB/CM > > - Mani > > -- > மணிவண்ணன் சதாசிவம்
On Wed, Aug 6, 2025 at 2:50 PM Bjorn Helgaas <helgaas@kernel.org> wrote: > > On Wed, Aug 06, 2025 at 02:38:12PM -0400, Jim Quinlan wrote: > > On Wed, Aug 6, 2025 at 2:15 PM Bjorn Helgaas <helgaas@kernel.org> wrote: > > > > > > On Fri, Jun 13, 2025 at 06:08:43PM -0400, Jim Quinlan wrote: > > > > Whereas most PCIe HW returns 0xffffffff on illegal accesses and the like, > > > > by default Broadcom's STB PCIe controller effects an abort. Some SoCs -- > > > > 7216 and its descendants -- have new HW that identifies error details. > > > > > > What's the long term plan for this? This abort is a huge problem that > > > we're seeing across arm64 platforms. Forcing a panic and reboot for > > > every uncorrectable error is pretty hard to deal with. > > > > Are you referring to STB/CM systems, Rpi, or something else altogether? > > Just in general. I saw this recently with a Nuvoton NPCM8xx PCIe > controller. I'm not an arm64 guy, but I've been told that these > aborts are basically unrecoverable from a kernel perspective. For > some reason several PCIe controllers intended for arm64 seem to raise > aborts on PCIe errors. At the moment, that means we can't recover > from errors like surprise unplugs and other things that *should* be > recoverable (perhaps at the cost of resetting or disabling a PCIe > device). FWIW, our original RC controller was paired with MIPs, so it could be that a number of non-x86 camps just went with the panic-y behavior. I believe that the PCIe spec allows this rude behavior, or doesn't specifically disallow it. I also remember that there is an ARM standard initiative for ARM-based systems that requires the PCIe error-gets-0xffffffff behavior. We obviously don't conform. At any rate, I will send an email now to the HW folks I know to remind them that we need this behavior, at least as a configurable option. Regards, Jim Quinlan Broadcom STB/CM > > > > Is there a plan to someday recover from these aborts? Or change the > > > hardware so it can at least be configured to return ~0 data after > > > logging the error in the hardware registers? > > > > Some of our upcoming chips will have the ability to do nothing on > > errant PCIe writes and return 0xffffffff on errant PCIe reads. But > > none of our STB/CM chips do this currently. I've been asking for > > this behavior for years but I have limited influence on what happens > > in HW. > > Fingers crossed for either that or some other way to make these things > recoverable. > > > > > This simple handler determines if the PCIe controller was the > > > > cause of the abort and if so, prints out diagnostic info. > > > > Unfortunately, an abort still occurs. > > > > > > > > Care is taken to read the error registers only when the PCIe > > > > bridge is active and the PCIe registers are acceptable. > > > > Otherwise, a "die" event caused by something other than the PCIe > > > > could cause an abort if the PCIe "die" handler tried to access > > > > registers when the bridge is off. > > > > > > Checking whether the bridge is active is a "mostly-works" > > > situation since it's always racy. > > > > I'm not sure I understand the "racy" comment. If the PCIe bridge is > > off, we do not read the PCIe error registers. In this case, PCIe is > > probably not the cause of the panic. In the rare case the PCIe > > bridge is off and it was the PCIe that caused the panic, nothing > > gets reported, and this is where we are without this commit. > > Perhaps this is what you mean by "mostly-works". But this is the > > best that can be done with SW given our HW. > > Right, my fault. The error report registers don't look like standard > PCIe things, so I suppose they are on the host side, not the PCIe > side, so they're probably guaranteed to be accessible and non-racy > unless the bridge is in reset. > > Bjorn
On 6/13/25 15:08, Jim Quinlan wrote: > Whereas most PCIe HW returns 0xffffffff on illegal accesses and the like, > by default Broadcom's STB PCIe controller effects an abort. Some SoCs -- > 7216 and its descendants -- have new HW that identifies error details. > > This simple handler determines if the PCIe controller was the cause of the > abort and if so, prints out diagnostic info. Unfortunately, an abort still > occurs. > > Care is taken to read the error registers only when the PCIe bridge is > active and the PCIe registers are acceptable. Otherwise, a "die" event > caused by something other than the PCIe could cause an abort if the PCIe > "die" handler tried to access registers when the bridge is off. > > Example error output: > brcm-pcie 8b20000.pcie: Error: Mem Acc: 32bit, Read, @0x38000000 > brcm-pcie 8b20000.pcie: Type: TO=0 Abt=0 UnspReq=1 AccDsble=0 BadAddr=0 > > Signed-off-by: Jim Quinlan <james.quinlan@broadcom.com> Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com> -- Florian
© 2016 - 2025 Red Hat, Inc.