The AER injection path may run in interrupt-disabled context while
holding inject_lock. On PREEMPT_RT kernels, spinlock_t becomes a
sleeping lock, so it must not be taken while a raw_spinlock_t is held.
Doing so violates lock ordering rules and trigger lockdep reports
such as “Invalid wait context”.
Convert inject_lock to raw_spinlock_t to ensure non-sleeping locking
semantics. The protected list is bounded and used only for debugging,
so raw locking will not cause latency issues.
Acked-by: Waiman Long <longman@redhat.com>
Signed-off-by: Guangbo Cui <jckeep.cuiguangbo@gmail.com>
---
drivers/pci/pcie/aer_inject.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index d0cabfc04d48..a064fa2acb94 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -72,7 +72,7 @@ static LIST_HEAD(einjected);
static LIST_HEAD(pci_bus_ops_list);
/* Protect einjected and pci_bus_ops_list */
-static DEFINE_SPINLOCK(inject_lock);
+static DEFINE_RAW_SPINLOCK(inject_lock);
static void aer_error_init(struct aer_error *err, u32 domain,
unsigned int bus, unsigned int devfn,
@@ -126,12 +126,12 @@ static struct pci_bus_ops *pci_bus_ops_pop(void)
unsigned long flags;
struct pci_bus_ops *bus_ops;
- spin_lock_irqsave(&inject_lock, flags);
+ raw_spin_lock_irqsave(&inject_lock, flags);
bus_ops = list_first_entry_or_null(&pci_bus_ops_list,
struct pci_bus_ops, list);
if (bus_ops)
list_del(&bus_ops->list);
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
return bus_ops;
}
@@ -223,7 +223,7 @@ static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
int domain;
int rv;
- spin_lock_irqsave(&inject_lock, flags);
+ raw_spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
goto out;
domain = pci_domain_nr(bus);
@@ -236,12 +236,12 @@ static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
sim = find_pci_config_dword(err, where, NULL);
if (sim) {
*val = *sim;
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
return 0;
}
out:
rv = aer_inj_read(bus, devfn, where, size, val);
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -255,7 +255,7 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
int domain;
int rv;
- spin_lock_irqsave(&inject_lock, flags);
+ raw_spin_lock_irqsave(&inject_lock, flags);
if (size != sizeof(u32))
goto out;
domain = pci_domain_nr(bus);
@@ -271,12 +271,12 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
*sim ^= val;
else
*sim = val;
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
return 0;
}
out:
rv = aer_inj_write(bus, devfn, where, size, val);
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -304,14 +304,14 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
if (!bus_ops)
return -ENOMEM;
ops = pci_bus_set_ops(bus, &aer_inj_pci_ops);
- spin_lock_irqsave(&inject_lock, flags);
+ raw_spin_lock_irqsave(&inject_lock, flags);
if (ops == &aer_inj_pci_ops)
goto out;
pci_bus_ops_init(bus_ops, bus, ops);
list_add(&bus_ops->list, &pci_bus_ops_list);
bus_ops = NULL;
out:
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
kfree(bus_ops);
return 0;
}
@@ -383,7 +383,7 @@ static int aer_inject(struct aer_error_inj *einj)
uncor_mask);
}
- spin_lock_irqsave(&inject_lock, flags);
+ raw_spin_lock_irqsave(&inject_lock, flags);
err = __find_aer_error_by_dev(dev);
if (!err) {
@@ -404,14 +404,14 @@ static int aer_inject(struct aer_error_inj *einj)
!(einj->cor_status & ~cor_mask)) {
ret = -EINVAL;
pci_warn(dev, "The correctable error(s) is masked by device\n");
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
if (!aer_mask_override && einj->uncor_status &&
!(einj->uncor_status & ~uncor_mask)) {
ret = -EINVAL;
pci_warn(dev, "The uncorrectable error(s) is masked by device\n");
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
goto out_put;
}
@@ -445,7 +445,7 @@ static int aer_inject(struct aer_error_inj *einj)
rperr->source_id &= 0x0000ffff;
rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
}
- spin_unlock_irqrestore(&inject_lock, flags);
+ raw_spin_unlock_irqrestore(&inject_lock, flags);
if (aer_mask_override) {
pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
--
2.43.0
On Sun, Nov 02, 2025 at 10:57:06AM +0000, Guangbo Cui wrote:
> The AER injection path may run in interrupt-disabled context while
> holding inject_lock. On PREEMPT_RT kernels, spinlock_t becomes a
> sleeping lock, so it must not be taken while a raw_spinlock_t is held.
> Doing so violates lock ordering rules and trigger lockdep reports
> such as “Invalid wait context”.
>
> Convert inject_lock to raw_spinlock_t to ensure non-sleeping locking
> semantics. The protected list is bounded and used only for debugging,
> so raw locking will not cause latency issues.
Bounded how?
Also,
---
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -85,12 +85,13 @@ static void aer_error_init(struct aer_er
err->pos_cap_err = pos_cap_err;
}
-/* inject_lock must be held before calling */
static struct aer_error *__find_aer_error(u32 domain, unsigned int bus,
unsigned int devfn)
{
struct aer_error *err;
+ lockdep_assert_held(&inject_lock);
+
list_for_each_entry(err, &einjected, list) {
if (domain == err->domain &&
bus == err->bus &&
@@ -100,20 +101,24 @@ static struct aer_error *__find_aer_erro
return NULL;
}
-/* inject_lock must be held before calling */
static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
{
- int domain = pci_domain_nr(dev->bus);
+ int domain;
+
+ lockdep_assert_held(&inject_lock);
+
+ domain = pci_domain_nr(dev->bus);
if (domain < 0)
return NULL;
return __find_aer_error(domain, dev->bus->number, dev->devfn);
}
-/* inject_lock must be held before calling */
static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
{
struct pci_bus_ops *bus_ops;
+ lockdep_assert_held(&inject_lock);
+
list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
if (bus_ops->bus == bus)
return bus_ops->ops;
On 2025-11-03 20:21:20 [+0100], Peter Zijlstra wrote: > On Sun, Nov 02, 2025 at 10:57:06AM +0000, Guangbo Cui wrote: > > The AER injection path may run in interrupt-disabled context while > > holding inject_lock. On PREEMPT_RT kernels, spinlock_t becomes a > > sleeping lock, so it must not be taken while a raw_spinlock_t is held. > > Doing so violates lock ordering rules and trigger lockdep reports > > such as “Invalid wait context”. > > > > Convert inject_lock to raw_spinlock_t to ensure non-sleeping locking > > semantics. The protected list is bounded and used only for debugging, > > so raw locking will not cause latency issues. > > Bounded how? I think I used the term "not bounded" and said "it does not matter because it is debugging only". Best would be to leave that part and use only "only for debugging". > Also, Sebastian
On Mon, Nov 03, 2025 at 09:02:53PM +0100, Sebastian Andrzej Siewior wrote: > On 2025-11-03 20:21:20 [+0100], Peter Zijlstra wrote: > > On Sun, Nov 02, 2025 at 10:57:06AM +0000, Guangbo Cui wrote: > > > The AER injection path may run in interrupt-disabled context while > > > holding inject_lock. On PREEMPT_RT kernels, spinlock_t becomes a > > > sleeping lock, so it must not be taken while a raw_spinlock_t is held. > > > Doing so violates lock ordering rules and trigger lockdep reports > > > such as “Invalid wait context”. > > > > > > Convert inject_lock to raw_spinlock_t to ensure non-sleeping locking > > > semantics. The protected list is bounded and used only for debugging, > > > so raw locking will not cause latency issues. > > > > Bounded how? > > I think I used the term "not bounded" and said "it does not matter > because it is debugging only". Best would be to leave that part and use > only "only for debugging". Yes, that is clearer. Thanks!
On Mon, Nov 03, 2025 at 08:21:20PM +0100, Peter Zijlstra wrote:
> On Sun, Nov 02, 2025 at 10:57:06AM +0000, Guangbo Cui wrote:
> > The AER injection path may run in interrupt-disabled context while
> > holding inject_lock. On PREEMPT_RT kernels, spinlock_t becomes a
> > sleeping lock, so it must not be taken while a raw_spinlock_t is held.
> > Doing so violates lock ordering rules and trigger lockdep reports
> > such as “Invalid wait context”.
> >
> > Convert inject_lock to raw_spinlock_t to ensure non-sleeping locking
> > semantics. The protected list is bounded and used only for debugging,
> > so raw locking will not cause latency issues.
>
> Bounded how?
>
And
---
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -128,15 +128,14 @@ static struct pci_ops *__find_pci_bus_op
static struct pci_bus_ops *pci_bus_ops_pop(void)
{
- unsigned long flags;
struct pci_bus_ops *bus_ops;
- raw_spin_lock_irqsave(&inject_lock, flags);
+ guard(raw_spinlock_irqsave)(&inject_lock);
bus_ops = list_first_entry_or_null(&pci_bus_ops_list,
struct pci_bus_ops, list);
if (bus_ops)
list_del(&bus_ops->list);
- raw_spin_unlock_irqrestore(&inject_lock, flags);
+
return bus_ops;
}
@@ -222,18 +221,18 @@ static int aer_inj_write(struct pci_bus
static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- u32 *sim;
struct aer_error *err;
- unsigned long flags;
int domain;
- int rv;
+ u32 *sim;
- raw_spin_lock_irqsave(&inject_lock, flags);
+ guard(raw_spinlock_irqsave)(&inject_lock);
if (size != sizeof(u32))
goto out;
+
domain = pci_domain_nr(bus);
if (domain < 0)
goto out;
+
err = __find_aer_error(domain, bus->number, devfn);
if (!err)
goto out;
@@ -241,31 +240,29 @@ static int aer_inj_read_config(struct pc
sim = find_pci_config_dword(err, where, NULL);
if (sim) {
*val = *sim;
- raw_spin_unlock_irqrestore(&inject_lock, flags);
return 0;
}
+
out:
- rv = aer_inj_read(bus, devfn, where, size, val);
- raw_spin_unlock_irqrestore(&inject_lock, flags);
- return rv;
+ return aer_inj_read(bus, devfn, where, size, val);
}
static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- u32 *sim;
struct aer_error *err;
- unsigned long flags;
- int rw1cs;
int domain;
- int rv;
+ int rw1cs;
+ u32 *sim;
- raw_spin_lock_irqsave(&inject_lock, flags);
+ guard(raw_spinlock_irqsave)(&inject_lock);
if (size != sizeof(u32))
goto out;
+
domain = pci_domain_nr(bus);
if (domain < 0)
goto out;
+
err = __find_aer_error(domain, bus->number, devfn);
if (!err)
goto out;
@@ -276,13 +273,10 @@ static int aer_inj_write_config(struct p
*sim ^= val;
else
*sim = val;
- raw_spin_unlock_irqrestore(&inject_lock, flags);
return 0;
}
out:
- rv = aer_inj_write(bus, devfn, where, size, val);
- raw_spin_unlock_irqrestore(&inject_lock, flags);
- return rv;
+ return aer_inj_write(bus, devfn, where, size, val);
}
static struct pci_ops aer_inj_pci_ops = {
@@ -301,22 +295,21 @@ static void pci_bus_ops_init(struct pci_
static int pci_bus_set_aer_ops(struct pci_bus *bus)
{
- struct pci_ops *ops;
struct pci_bus_ops *bus_ops;
- unsigned long flags;
+ struct pci_ops *ops;
bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
if (!bus_ops)
return -ENOMEM;
ops = pci_bus_set_ops(bus, &aer_inj_pci_ops);
- raw_spin_lock_irqsave(&inject_lock, flags);
- if (ops == &aer_inj_pci_ops)
- goto out;
- pci_bus_ops_init(bus_ops, bus, ops);
- list_add(&bus_ops->list, &pci_bus_ops_list);
- bus_ops = NULL;
-out:
- raw_spin_unlock_irqrestore(&inject_lock, flags);
+
+ scoped_guard (raw_spinlock_irqsave, &inject_lock) {
+ if (ops == &aer_inj_pci_ops)
+ break;
+ pci_bus_ops_init(bus_ops, bus, ops);
+ list_add(&bus_ops->list, &pci_bus_ops_list);
+ bus_ops = NULL;
+ }
kfree(bus_ops);
return 0;
}
@@ -388,69 +381,66 @@ static int aer_inject(struct aer_error_i
uncor_mask);
}
- raw_spin_lock_irqsave(&inject_lock, flags);
-
- err = __find_aer_error_by_dev(dev);
- if (!err) {
- err = err_alloc;
- err_alloc = NULL;
- aer_error_init(err, einj->domain, einj->bus, devfn,
- pos_cap_err);
- list_add(&err->list, &einjected);
- }
- err->uncor_status |= einj->uncor_status;
- err->cor_status |= einj->cor_status;
- err->header_log0 = einj->header_log0;
- err->header_log1 = einj->header_log1;
- err->header_log2 = einj->header_log2;
- err->header_log3 = einj->header_log3;
-
- if (!aer_mask_override && einj->cor_status &&
- !(einj->cor_status & ~cor_mask)) {
- ret = -EINVAL;
- pci_warn(dev, "The correctable error(s) is masked by device\n");
- raw_spin_unlock_irqrestore(&inject_lock, flags);
- goto out_put;
- }
- if (!aer_mask_override && einj->uncor_status &&
- !(einj->uncor_status & ~uncor_mask)) {
- ret = -EINVAL;
- pci_warn(dev, "The uncorrectable error(s) is masked by device\n");
- raw_spin_unlock_irqrestore(&inject_lock, flags);
- goto out_put;
- }
-
- rperr = __find_aer_error_by_dev(rpdev);
- if (!rperr) {
- rperr = rperr_alloc;
- rperr_alloc = NULL;
- aer_error_init(rperr, pci_domain_nr(rpdev->bus),
- rpdev->bus->number, rpdev->devfn,
- rp_pos_cap_err);
- list_add(&rperr->list, &einjected);
- }
- if (einj->cor_status) {
- if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
- rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
- else
- rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
- rperr->source_id &= 0xffff0000;
- rperr->source_id |= PCI_DEVID(einj->bus, devfn);
- }
- if (einj->uncor_status) {
- if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
- rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
- if (sever & einj->uncor_status) {
- rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
- if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
- rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
- } else
- rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
- rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
- rperr->source_id &= 0x0000ffff;
- rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
+ scoped_guard (raw_spinlock_irqsave, &inject_lock) {
+ err = __find_aer_error_by_dev(dev);
+ if (!err) {
+ err = err_alloc;
+ err_alloc = NULL;
+ aer_error_init(err, einj->domain, einj->bus, devfn,
+ pos_cap_err);
+ list_add(&err->list, &einjected);
+ }
+ err->uncor_status |= einj->uncor_status;
+ err->cor_status |= einj->cor_status;
+ err->header_log0 = einj->header_log0;
+ err->header_log1 = einj->header_log1;
+ err->header_log2 = einj->header_log2;
+ err->header_log3 = einj->header_log3;
+
+ if (!aer_mask_override && einj->cor_status &&
+ !(einj->cor_status & ~cor_mask)) {
+ ret = -EINVAL;
+ pci_warn(dev, "The correctable error(s) is masked by device\n");
+ goto out_put;
+ }
+ if (!aer_mask_override && einj->uncor_status &&
+ !(einj->uncor_status & ~uncor_mask)) {
+ ret = -EINVAL;
+ pci_warn(dev, "The uncorrectable error(s) is masked by device\n");
+ goto out_put;
+ }
+
+ rperr = __find_aer_error_by_dev(rpdev);
+ if (!rperr) {
+ rperr = rperr_alloc;
+ rperr_alloc = NULL;
+ aer_error_init(rperr, pci_domain_nr(rpdev->bus),
+ rpdev->bus->number, rpdev->devfn,
+ rp_pos_cap_err);
+ list_add(&rperr->list, &einjected);
+ }
+ if (einj->cor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
+ else
+ rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
+ rperr->source_id &= 0xffff0000;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn);
+ }
+ if (einj->uncor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ if (sever & einj->uncor_status) {
+ rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
+ if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
+ rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
+ } else
+ rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
+ rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
+ rperr->source_id &= 0x0000ffff;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
+ }
}
- raw_spin_unlock_irqrestore(&inject_lock, flags);
if (aer_mask_override) {
pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
On Mon, Nov 03, 2025 at 08:27:09PM +0100, Peter Zijlstra wrote:
> On Mon, Nov 03, 2025 at 08:21:20PM +0100, Peter Zijlstra wrote:
> > On Sun, Nov 02, 2025 at 10:57:06AM +0000, Guangbo Cui wrote:
> > > The AER injection path may run in interrupt-disabled context while
> > > holding inject_lock. On PREEMPT_RT kernels, spinlock_t becomes a
> > > sleeping lock, so it must not be taken while a raw_spinlock_t is held.
> > > Doing so violates lock ordering rules and trigger lockdep reports
> > > such as “Invalid wait context”.
> > >
> > > Convert inject_lock to raw_spinlock_t to ensure non-sleeping locking
> > > semantics. The protected list is bounded and used only for debugging,
> > > so raw locking will not cause latency issues.
> >
> > Bounded how?
> >
> + scoped_guard (raw_spinlock_irqsave, &inject_lock) {
> + if (ops == &aer_inj_pci_ops)
> + break;
> + pci_bus_ops_init(bus_ops, bus, ops);
> + list_add(&bus_ops->list, &pci_bus_ops_list);
> + bus_ops = NULL;
> + }
I found that there are two styles of calling scoped_guard in the kernel:
1. scoped_guard (...)
2. scoped_guard(...)
Is there any coding convention or guideline regarding this?
> + rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
> + rperr->source_id &= 0xffff0000;
> + rperr->source_id |= PCI_DEVID(einj->bus, devfn);
> + }
> + if (einj->uncor_status) {
> + if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
> + rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
> + if (sever & einj->uncor_status) {
> + rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
> + if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
> + rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
> + } else
> + rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
> + rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
> + rperr->source_id &= 0x0000ffff;
> + rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
> + }
> }
> - raw_spin_unlock_irqrestore(&inject_lock, flags);
>
> if (aer_mask_override) {
> pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
LGTM, If there are no objections, I’ll include these two patches in the
next version of the patchset and add your Signed-off-by tag.
Best regards,
Guangbo
On 2025-11-06 13:08:22 [+0000], Guangbo Cui wrote: > > LGTM, If there are no objections, I’ll include these two patches in the > next version of the patchset and add your Signed-off-by tag. I would suggest a Suggested-by or a Co-developed-by. Unless you take his work, make him the author and simply provide a patch description among his "patch". > Best regards, > Guangbo Sebastian
On Thu, Nov 06, 2025 at 01:08:22PM +0000, Guangbo Cui wrote:
> > + scoped_guard (raw_spinlock_irqsave, &inject_lock) {
> > + if (ops == &aer_inj_pci_ops)
> > + break;
> > + pci_bus_ops_init(bus_ops, bus, ops);
> > + list_add(&bus_ops->list, &pci_bus_ops_list);
> > + bus_ops = NULL;
> > + }
>
> I found that there are two styles of calling scoped_guard in the kernel:
>
> 1. scoped_guard (...)
>
> 2. scoped_guard(...)
>
> Is there any coding convention or guideline regarding this?
Not really :/ I usually use the former, to mirror if (cond) and for
(;;) usage as opposed to func(args).
> > + rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
> > + rperr->source_id &= 0xffff0000;
> > + rperr->source_id |= PCI_DEVID(einj->bus, devfn);
> > + }
> > + if (einj->uncor_status) {
> > + if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
> > + rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
> > + if (sever & einj->uncor_status) {
> > + rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
> > + if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
> > + rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
> > + } else
> > + rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
> > + rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
> > + rperr->source_id &= 0x0000ffff;
> > + rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
> > + }
> > }
> > - raw_spin_unlock_irqrestore(&inject_lock, flags);
> >
> > if (aer_mask_override) {
> > pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
>
> LGTM, If there are no objections, I’ll include these two patches in the
> next version of the patchset and add your Signed-off-by tag.
Sure, but please do test them, I've not even had them near a compiler
;-)
Thanks!
© 2016 - 2026 Red Hat, Inc.