This patch is based on Linux kernel 6.16.0.
Introduce a lockless mechanism for tracking pending vCPU interrupts using
atomic bit operations. The design follows a multi-producer, single-consumer
model where the consumer is the vCPU itself.
Two bitmaps are added:
- irqs_pending — represents interrupts currently pending
- irqs_pending_mask — represents bits that have changed in irqs_pending
Introduce vcpu_(un)set_interrupt() to mark an interrupt in irqs_pending{_mask}
bitmap(s) to notify vCPU that it has or no an interrupt.
Other parts (such as vcpu_has_interrupts(), vcpu_flush_interrupts() and
vcpu_sync_interrupts()) of a lockless mechanism for tracking pending vCPU
interuupts are going to be introduced in a separate patch.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
xen/arch/riscv/domain.c | 47 +++++++++++++++++++++
xen/arch/riscv/include/asm/domain.h | 19 +++++++++
xen/arch/riscv/include/asm/riscv_encoding.h | 1 +
3 files changed, 67 insertions(+)
diff --git a/xen/arch/riscv/domain.c b/xen/arch/riscv/domain.c
index 164ab14a5209..8a010ae5b47e 100644
--- a/xen/arch/riscv/domain.c
+++ b/xen/arch/riscv/domain.c
@@ -5,9 +5,11 @@
#include <xen/sched.h>
#include <xen/smp.h>
+#include <asm/bitops.h>
#include <asm/cpufeature.h>
#include <asm/csr.h>
#include <asm/riscv_encoding.h>
+#include <asm/system.h>
#include <asm/vtimer.h>
static void vcpu_csr_init(struct vcpu *v)
@@ -100,6 +102,9 @@ int arch_vcpu_create(struct vcpu *v)
if ( is_idle_vcpu(v) )
return rc;
+ bitmap_zero(v->arch.irqs_pending, RISCV_VCPU_NR_IRQS);
+ bitmap_zero(v->arch.irqs_pending_mask, RISCV_VCPU_NR_IRQS);
+
if ( (rc = vcpu_vtimer_init(v)) )
goto fail;
@@ -135,3 +140,45 @@ void vcpu_kick(struct vcpu *v)
smp_send_event_check_mask(cpumask_of(v->processor));
}
}
+
+int vcpu_set_interrupt(struct vcpu *v, const unsigned int irq)
+{
+ /*
+ * We only allow VS-mode software, timer, and external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if ( irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
+ irq != IRQ_VS_TIMER &&
+ irq != IRQ_VS_EXT )
+ return -EINVAL;
+
+ set_bit(irq, v->arch.irqs_pending);
+ smp_mb__before_atomic();
+ set_bit(irq, v->arch.irqs_pending_mask);
+
+ vcpu_kick(v);
+
+ return 0;
+}
+
+int vcpu_unset_interrupt(struct vcpu *v, const unsigned int irq)
+{
+ /*
+ * We only allow VS-mode software, timer, external
+ * interrupts when irq is one of the local interrupts
+ * defined by RISC-V privilege specification.
+ */
+ if ( irq < IRQ_LOCAL_MAX &&
+ irq != IRQ_VS_SOFT &&
+ irq != IRQ_VS_TIMER &&
+ irq != IRQ_VS_EXT )
+ return -EINVAL;
+
+ clear_bit(irq, v->arch.irqs_pending);
+ smp_mb__before_atomic();
+ set_bit(irq, v->arch.irqs_pending_mask);
+
+ return 0;
+}
diff --git a/xen/arch/riscv/include/asm/domain.h b/xen/arch/riscv/include/asm/domain.h
index be7ddaff30e7..a7538e0dc966 100644
--- a/xen/arch/riscv/include/asm/domain.h
+++ b/xen/arch/riscv/include/asm/domain.h
@@ -85,6 +85,22 @@ struct arch_vcpu
register_t vstval;
register_t vsatp;
register_t vsepc;
+
+ /*
+ * VCPU interrupts
+ *
+ * We have a lockless approach for tracking pending VCPU interrupts
+ * implemented using atomic bitops. The irqs_pending bitmap represent
+ * pending interrupts whereas irqs_pending_mask represent bits changed
+ * in irqs_pending. Our approach is modeled around multiple producer
+ * and single consumer problem where the consumer is the VCPU itself.
+ *
+ * DECLARE_BITMAP() is needed here to support 64 vCPU local interrupts
+ * on RV32 host.
+ */
+#define RISCV_VCPU_NR_IRQS 64
+ DECLARE_BITMAP(irqs_pending, RISCV_VCPU_NR_IRQS);
+ DECLARE_BITMAP(irqs_pending_mask, RISCV_VCPU_NR_IRQS);
} __cacheline_aligned;
struct paging_domain {
@@ -123,6 +139,9 @@ static inline void update_guest_memory_policy(struct vcpu *v,
static inline void arch_vcpu_block(struct vcpu *v) {}
+int vcpu_set_interrupt(struct vcpu *v, const unsigned int irq);
+int vcpu_unset_interrupt(struct vcpu *v, const unsigned int irq);
+
#endif /* ASM__RISCV__DOMAIN_H */
/*
diff --git a/xen/arch/riscv/include/asm/riscv_encoding.h b/xen/arch/riscv/include/asm/riscv_encoding.h
index dd15731a86fa..32d25f2d3e94 100644
--- a/xen/arch/riscv/include/asm/riscv_encoding.h
+++ b/xen/arch/riscv/include/asm/riscv_encoding.h
@@ -91,6 +91,7 @@
#define IRQ_M_EXT 11
#define IRQ_S_GEXT 12
#define IRQ_PMU_OVF 13
+#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1)
#define MIP_SSIP (_UL(1) << IRQ_S_SOFT)
#define MIP_VSSIP (_UL(1) << IRQ_VS_SOFT)
--
2.52.0
On 24.12.2025 18:03, Oleksii Kurochko wrote:
> This patch is based on Linux kernel 6.16.0.
>
> Introduce a lockless mechanism for tracking pending vCPU interrupts using
> atomic bit operations. The design follows a multi-producer, single-consumer
> model where the consumer is the vCPU itself.
>
> Two bitmaps are added:
> - irqs_pending — represents interrupts currently pending
> - irqs_pending_mask — represents bits that have changed in irqs_pending
>
> Introduce vcpu_(un)set_interrupt() to mark an interrupt in irqs_pending{_mask}
> bitmap(s) to notify vCPU that it has or no an interrupt.
It's not becoming clear how these are going to be used. It's also not clear
to me whether you really need to record these in software: Aren't there
(virtual) registers where they would be more naturally tracked, much like
hardware would do?
Furthermore, since you're dealing with two bitmaps, there's no full
atomicity here anyway. The bitmaps are each dealt with atomically, but
the overall update isn't atomic. Whether that's going to be okay can only
be told when also seeing the producer side.
> --- a/xen/arch/riscv/domain.c
> +++ b/xen/arch/riscv/domain.c
> @@ -5,9 +5,11 @@
> #include <xen/sched.h>
> #include <xen/smp.h>
>
> +#include <asm/bitops.h>
> #include <asm/cpufeature.h>
> #include <asm/csr.h>
> #include <asm/riscv_encoding.h>
> +#include <asm/system.h>
> #include <asm/vtimer.h>
>
> static void vcpu_csr_init(struct vcpu *v)
> @@ -100,6 +102,9 @@ int arch_vcpu_create(struct vcpu *v)
> if ( is_idle_vcpu(v) )
> return rc;
>
> + bitmap_zero(v->arch.irqs_pending, RISCV_VCPU_NR_IRQS);
> + bitmap_zero(v->arch.irqs_pending_mask, RISCV_VCPU_NR_IRQS);
This is pointless, as struct vcpu starts out all zero.
> @@ -135,3 +140,45 @@ void vcpu_kick(struct vcpu *v)
> smp_send_event_check_mask(cpumask_of(v->processor));
> }
> }
> +
> +int vcpu_set_interrupt(struct vcpu *v, const unsigned int irq)
> +{
> + /*
> + * We only allow VS-mode software, timer, and external
> + * interrupts when irq is one of the local interrupts
> + * defined by RISC-V privilege specification.
> + */
> + if ( irq < IRQ_LOCAL_MAX &&
What use is this? In particular this allows an incoming irq with a huge
number to ...
> + irq != IRQ_VS_SOFT &&
> + irq != IRQ_VS_TIMER &&
> + irq != IRQ_VS_EXT )
> + return -EINVAL;
> +
> + set_bit(irq, v->arch.irqs_pending);
> + smp_mb__before_atomic();
> + set_bit(irq, v->arch.irqs_pending_mask);
... overrun both bitmaps.
> --- a/xen/arch/riscv/include/asm/domain.h
> +++ b/xen/arch/riscv/include/asm/domain.h
> @@ -85,6 +85,22 @@ struct arch_vcpu
> register_t vstval;
> register_t vsatp;
> register_t vsepc;
> +
> + /*
> + * VCPU interrupts
> + *
> + * We have a lockless approach for tracking pending VCPU interrupts
> + * implemented using atomic bitops. The irqs_pending bitmap represent
> + * pending interrupts whereas irqs_pending_mask represent bits changed
> + * in irqs_pending.
And hence a set immediately followed by an unset is then indistinguishable
from just an unset (or the other way around). This may not be a problem, but
if it isn't, I think this needs explaining. Much like it is unclear why the
"changed" state needs tracking in the first place.
> Our approach is modeled around multiple producer
> + * and single consumer problem where the consumer is the VCPU itself.
> + *
> + * DECLARE_BITMAP() is needed here to support 64 vCPU local interrupts
> + * on RV32 host.
> + */
> +#define RISCV_VCPU_NR_IRQS 64
> + DECLARE_BITMAP(irqs_pending, RISCV_VCPU_NR_IRQS);
> + DECLARE_BITMAP(irqs_pending_mask, RISCV_VCPU_NR_IRQS);
> } __cacheline_aligned;
>
> struct paging_domain {
> @@ -123,6 +139,9 @@ static inline void update_guest_memory_policy(struct vcpu *v,
>
> static inline void arch_vcpu_block(struct vcpu *v) {}
>
> +int vcpu_set_interrupt(struct vcpu *v, const unsigned int irq);
> +int vcpu_unset_interrupt(struct vcpu *v, const unsigned int irq);
Why the const-s?
> --- a/xen/arch/riscv/include/asm/riscv_encoding.h
> +++ b/xen/arch/riscv/include/asm/riscv_encoding.h
> @@ -91,6 +91,7 @@
> #define IRQ_M_EXT 11
> #define IRQ_S_GEXT 12
> #define IRQ_PMU_OVF 13
> +#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1)
MAX together with "+ 1" looks wrong. What is 14 (which, when MAX is 14,
must be a valid interrupt)? Or if 14 isn't a valid interrupt, please use
NR or NUM.
Also, nit: Padding doesn't match with the earlier #define-s (even if in the
quoted text it appears otherwise).
Jan
© 2016 - 2026 Red Hat, Inc.