Using dynamically allocated / maintained vectors has several downsides:
- possible nesting of IRQs due to the effects of IRQ migration,
- reduction of vectors available for devices,
- IRQs not moving as intended if there's shortage of vectors,
- higher runtime overhead.
As the vector also doesn't need to be of any priority (first and foremost
it really shouldn't be of higher or same priority as the timer IRQ, as
that raises TIMER_SOFTIRQ anyway), avoid any "ordinary" vectors altogther
and use a vector from the 0x10...0x1f exception vector space. Exception vs
interrupt can easily be distinguished by checking for the presence of an
error code.
With a fixed vector, less updating is now necessary in
set_channel_irq_affinity(); in particular channels don't need transiently
masking anymore, as the necessary update is now atomic. To fully leverage
this, however, we want to stop using hpet_msi_set_affinity() there. With
the transient masking dropped, we're no longer at risk of missing events.
In principle a change to setup_vector_irq() would be necessary, but only
if we used low-prio vectors as direct-APIC ones. Since the change would be
at best benign here, it is being omitted.
Fixes: 996576b965cc ("xen: allow up to 16383 cpus")
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Release-Acked-by: Oleksii Kurochko<oleksii.kurochko@gmail.com>
---
This is an alternative proposal to
https://lists.xen.org/archives/html/xen-devel/2014-03/msg00399.html.
Should we keep hpet_msi_set_affinity() at all? We'd better not have the
generic IRQ subsystem play with our IRQs' affinities ... (If so, this
likely would want to be a separate patch, though.)
The hpet_enable_channel() call could in principle be made (effectively)
conditional, at the price of introducing a check in hpet_enable_channel().
However, as much as eliminating the masking didn't help with the many
excess (early) IRQs I'm observing on Intel hardware, doing so doesn't help
either.
The Fixes: tag indicates where the problem got signficantly worse; in
principle it was there already before (crashing at perhaps 6 or 7 levels
of nested IRQs).
---
v2: Re-work set_channel_irq_affinity() intensively. Re-base over the
dropping of another patch. Drop setup_vector_irq() change.
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -9,17 +9,19 @@
#include <xen/timer.h>
#include <xen/smp.h>
#include <xen/softirq.h>
+#include <xen/cpuidle.h>
#include <xen/irq.h>
#include <xen/numa.h>
#include <xen/param.h>
#include <xen/sched.h>
#include <asm/apic.h>
-#include <asm/fixmap.h>
#include <asm/div64.h>
+#include <asm/fixmap.h>
+#include <asm/genapic.h>
#include <asm/hpet.h>
+#include <asm/irq-vectors.h>
#include <asm/msi.h>
-#include <xen/cpuidle.h>
#define MAX_DELTA_NS MILLISECS(10*1000)
#define MIN_DELTA_NS MICROSECS(20)
@@ -251,10 +253,9 @@ static void cf_check hpet_interrupt_hand
ch->event_handler(ch);
}
-static void cf_check hpet_msi_unmask(struct irq_desc *desc)
+static void hpet_enable_channel(struct hpet_event_channel *ch)
{
u32 cfg;
- struct hpet_event_channel *ch = desc->action->dev_id;
cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
cfg |= HPET_TN_ENABLE;
@@ -262,6 +263,11 @@ static void cf_check hpet_msi_unmask(str
ch->msi.msi_attrib.host_masked = 0;
}
+static void cf_check hpet_msi_unmask(struct irq_desc *desc)
+{
+ hpet_enable_channel(desc->action->dev_id);
+}
+
static void hpet_disable_channel(struct hpet_event_channel *ch)
{
u32 cfg;
@@ -307,15 +313,13 @@ static void cf_check hpet_msi_set_affini
struct hpet_event_channel *ch = desc->action->dev_id;
struct msi_msg msg = ch->msi.msg;
- msg.dest32 = set_desc_affinity(desc, mask);
- if ( msg.dest32 == BAD_APICID )
- return;
+ /* This really is only for dump_irqs(). */
+ cpumask_copy(desc->arch.cpu_mask, mask);
- msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(desc->arch.vector);
+ msg.dest32 = cpu_mask_to_apicid(mask);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
- if ( msg.data != ch->msi.msg.data || msg.dest32 != ch->msi.msg.dest32 )
+ if ( msg.dest32 != ch->msi.msg.dest32 )
hpet_msi_write(ch, &msg);
}
@@ -328,7 +332,7 @@ static hw_irq_controller hpet_msi_type =
.shutdown = hpet_msi_shutdown,
.enable = hpet_msi_unmask,
.disable = hpet_msi_mask,
- .ack = ack_nonmaskable_msi_irq,
+ .ack = irq_actor_none,
.end = end_nonmaskable_irq,
.set_affinity = hpet_msi_set_affinity,
};
@@ -347,6 +351,12 @@ static int __init hpet_setup_msi_irq(str
u32 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
irq_desc_t *desc = irq_to_desc(ch->msi.irq);
+ clear_irq_vector(ch->msi.irq);
+ ret = bind_irq_vector(ch->msi.irq, HPET_BROADCAST_VECTOR, &cpu_online_map);
+ if ( ret )
+ return ret;
+ cpumask_setall(desc->affinity);
+
if ( iommu_intremap != iommu_intremap_off )
{
ch->msi.hpet_id = hpet_blockid;
@@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
static void set_channel_irq_affinity(struct hpet_event_channel *ch)
{
struct irq_desc *desc = irq_to_desc(ch->msi.irq);
+ struct msi_msg msg = ch->msi.msg;
ASSERT(!local_irq_is_enabled());
spin_lock(&desc->lock);
- hpet_msi_mask(desc);
- hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
- hpet_msi_unmask(desc);
+
+ per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
+
+ /*
+ * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
+ * actual update below (either of the IRTE or of [just] message address;
+ * with interrupt remapping message address/data don't change) now being
+ * atomic, we can avoid masking the IRQ around the update. As a result
+ * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
+ * keeps setting the new deadline only afterwards).
+ */
+ cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
+
spin_unlock(&desc->lock);
- spin_unlock(&ch->lock);
+ msg.dest32 = cpu_physical_id(ch->cpu);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
+ if ( msg.dest32 != ch->msi.msg.dest32 )
+ {
+ ch->msi.msg = msg;
+
+ if ( iommu_intremap != iommu_intremap_off )
+ {
+ int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
- /* We may have missed an interrupt due to the temporary masking. */
- if ( ch->event_handler && ch->next_event < NOW() )
- ch->event_handler(ch);
+ ASSERT(rc <= 0);
+ if ( rc > 0 )
+ {
+ ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
+ ASSERT(msg.address_lo ==
+ hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
+ }
+ }
+ else
+ hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
+ }
+
+ hpet_enable_channel(ch);
+ spin_unlock(&ch->lock);
}
static void hpet_attach_channel(unsigned int cpu,
--- a/xen/arch/x86/include/asm/irq-vectors.h
+++ b/xen/arch/x86/include/asm/irq-vectors.h
@@ -18,6 +18,15 @@
/* IRQ0 (timer) is statically allocated but must be high priority. */
#define IRQ0_VECTOR 0xf0
+/*
+ * Low-priority (for now statically allocated) vectors, sharing entry
+ * points with exceptions in the 0x10 ... 0x1f range, as long as the
+ * respective exception has an error code.
+ */
+#define FIRST_LOPRIORITY_VECTOR 0x10
+#define HPET_BROADCAST_VECTOR X86_EXC_AC
+#define LAST_LOPRIORITY_VECTOR 0x1f
+
/* Legacy PIC uses vectors 0x20-0x2f. */
#define FIRST_LEGACY_VECTOR FIRST_DYNAMIC_VECTOR
#define LAST_LEGACY_VECTOR (FIRST_LEGACY_VECTOR + 0xf)
@@ -40,7 +49,7 @@
/* There's no IRQ2 at the PIC. */
#define IRQ_MOVE_CLEANUP_VECTOR (FIRST_LEGACY_VECTOR + 2)
-#define FIRST_IRQ_VECTOR FIRST_DYNAMIC_VECTOR
+#define FIRST_IRQ_VECTOR FIRST_LOPRIORITY_VECTOR
#define LAST_IRQ_VECTOR LAST_HIPRIORITY_VECTOR
#endif /* _ASM_IRQ_VECTORS_H */
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -158,7 +158,7 @@ void msi_compose_msg(unsigned vector, co
{
memset(msg, 0, sizeof(*msg));
- if ( vector < FIRST_DYNAMIC_VECTOR )
+ if ( vector < FIRST_LOPRIORITY_VECTOR )
return;
if ( cpu_mask )
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -1045,7 +1045,13 @@ END(entry_GP)
FUNC(entry_AC)
ENDBR64
+ /* #AC shares its entry point with the HPET broadcast interrupt. */
+ test $8, %spl
+ jz .Lac
+ push $0
+.Lac:
movb $X86_EXC_AC, EFRAME_entry_vector(%rsp)
+ jnz common_interrupt
jmp handle_exception
END(entry_AC)
On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
> Using dynamically allocated / maintained vectors has several downsides:
> - possible nesting of IRQs due to the effects of IRQ migration,
> - reduction of vectors available for devices,
> - IRQs not moving as intended if there's shortage of vectors,
> - higher runtime overhead.
>
> As the vector also doesn't need to be of any priority (first and foremost
> it really shouldn't be of higher or same priority as the timer IRQ, as
> that raises TIMER_SOFTIRQ anyway), avoid any "ordinary" vectors altogther
> and use a vector from the 0x10...0x1f exception vector space. Exception vs
> interrupt can easily be distinguished by checking for the presence of an
> error code.
>
> With a fixed vector, less updating is now necessary in
> set_channel_irq_affinity(); in particular channels don't need transiently
> masking anymore, as the necessary update is now atomic. To fully leverage
> this, however, we want to stop using hpet_msi_set_affinity() there. With
> the transient masking dropped, we're no longer at risk of missing events.
>
> In principle a change to setup_vector_irq() would be necessary, but only
> if we used low-prio vectors as direct-APIC ones. Since the change would be
> at best benign here, it is being omitted.
>
> Fixes: 996576b965cc ("xen: allow up to 16383 cpus")
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Release-Acked-by: Oleksii Kurochko<oleksii.kurochko@gmail.com>
> ---
> This is an alternative proposal to
> https://lists.xen.org/archives/html/xen-devel/2014-03/msg00399.html.
>
> Should we keep hpet_msi_set_affinity() at all? We'd better not have the
> generic IRQ subsystem play with our IRQs' affinities ... (If so, this
> likely would want to be a separate patch, though.)
I think that needs to become a no-op, with possibly an ASSERT? Is it
possibly for dom0 to try to balance this IRQ? I would think not.
> The hpet_enable_channel() call could in principle be made (effectively)
> conditional, at the price of introducing a check in hpet_enable_channel().
> However, as much as eliminating the masking didn't help with the many
> excess (early) IRQs I'm observing on Intel hardware, doing so doesn't help
> either.
Let's go for the current approach.
> The Fixes: tag indicates where the problem got signficantly worse; in
> principle it was there already before (crashing at perhaps 6 or 7 levels
> of nested IRQs).
> ---
> v2: Re-work set_channel_irq_affinity() intensively. Re-base over the
> dropping of another patch. Drop setup_vector_irq() change.
>
> --- a/xen/arch/x86/hpet.c
> +++ b/xen/arch/x86/hpet.c
> @@ -9,17 +9,19 @@
> #include <xen/timer.h>
> #include <xen/smp.h>
> #include <xen/softirq.h>
> +#include <xen/cpuidle.h>
> #include <xen/irq.h>
> #include <xen/numa.h>
> #include <xen/param.h>
> #include <xen/sched.h>
>
> #include <asm/apic.h>
> -#include <asm/fixmap.h>
> #include <asm/div64.h>
> +#include <asm/fixmap.h>
> +#include <asm/genapic.h>
> #include <asm/hpet.h>
> +#include <asm/irq-vectors.h>
> #include <asm/msi.h>
> -#include <xen/cpuidle.h>
>
> #define MAX_DELTA_NS MILLISECS(10*1000)
> #define MIN_DELTA_NS MICROSECS(20)
> @@ -251,10 +253,9 @@ static void cf_check hpet_interrupt_hand
> ch->event_handler(ch);
> }
>
> -static void cf_check hpet_msi_unmask(struct irq_desc *desc)
> +static void hpet_enable_channel(struct hpet_event_channel *ch)
> {
> u32 cfg;
> - struct hpet_event_channel *ch = desc->action->dev_id;
>
> cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
> cfg |= HPET_TN_ENABLE;
> @@ -262,6 +263,11 @@ static void cf_check hpet_msi_unmask(str
> ch->msi.msi_attrib.host_masked = 0;
> }
>
> +static void cf_check hpet_msi_unmask(struct irq_desc *desc)
> +{
> + hpet_enable_channel(desc->action->dev_id);
> +}
> +
> static void hpet_disable_channel(struct hpet_event_channel *ch)
> {
> u32 cfg;
> @@ -307,15 +313,13 @@ static void cf_check hpet_msi_set_affini
> struct hpet_event_channel *ch = desc->action->dev_id;
> struct msi_msg msg = ch->msi.msg;
>
> - msg.dest32 = set_desc_affinity(desc, mask);
> - if ( msg.dest32 == BAD_APICID )
> - return;
> + /* This really is only for dump_irqs(). */
> + cpumask_copy(desc->arch.cpu_mask, mask);
>
> - msg.data &= ~MSI_DATA_VECTOR_MASK;
> - msg.data |= MSI_DATA_VECTOR(desc->arch.vector);
> + msg.dest32 = cpu_mask_to_apicid(mask);
> msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
> msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
> - if ( msg.data != ch->msi.msg.data || msg.dest32 != ch->msi.msg.dest32 )
> + if ( msg.dest32 != ch->msi.msg.dest32 )
> hpet_msi_write(ch, &msg);
> }
>
> @@ -328,7 +332,7 @@ static hw_irq_controller hpet_msi_type =
> .shutdown = hpet_msi_shutdown,
> .enable = hpet_msi_unmask,
> .disable = hpet_msi_mask,
> - .ack = ack_nonmaskable_msi_irq,
> + .ack = irq_actor_none,
> .end = end_nonmaskable_irq,
> .set_affinity = hpet_msi_set_affinity,
> };
> @@ -347,6 +351,12 @@ static int __init hpet_setup_msi_irq(str
> u32 cfg = hpet_read32(HPET_Tn_CFG(ch->idx));
> irq_desc_t *desc = irq_to_desc(ch->msi.irq);
>
> + clear_irq_vector(ch->msi.irq);
> + ret = bind_irq_vector(ch->msi.irq, HPET_BROADCAST_VECTOR, &cpu_online_map);
> + if ( ret )
> + return ret;
> + cpumask_setall(desc->affinity);
> +
> if ( iommu_intremap != iommu_intremap_off )
> {
> ch->msi.hpet_id = hpet_blockid;
> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
> {
> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
> + struct msi_msg msg = ch->msi.msg;
>
> ASSERT(!local_irq_is_enabled());
> spin_lock(&desc->lock);
> - hpet_msi_mask(desc);
> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
> - hpet_msi_unmask(desc);
> +
> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
> +
> + /*
> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
> + * actual update below (either of the IRTE or of [just] message address;
> + * with interrupt remapping message address/data don't change) now being
> + * atomic, we can avoid masking the IRQ around the update. As a result
> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
> + * keeps setting the new deadline only afterwards).
> + */
> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
> +
> spin_unlock(&desc->lock);
>
> - spin_unlock(&ch->lock);
> + msg.dest32 = cpu_physical_id(ch->cpu);
> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
> + if ( msg.dest32 != ch->msi.msg.dest32 )
> + {
> + ch->msi.msg = msg;
> +
> + if ( iommu_intremap != iommu_intremap_off )
> + {
> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
>
> - /* We may have missed an interrupt due to the temporary masking. */
> - if ( ch->event_handler && ch->next_event < NOW() )
> - ch->event_handler(ch);
> + ASSERT(rc <= 0);
> + if ( rc > 0 )
> + {
> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
> + ASSERT(msg.address_lo ==
> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
> + }
The sequence of asserts seem wrong here, the asserts inside of the rc
> 0 check will never trigger, because there's an ASSERT(rc <= 0)
ahead of them?
> + }
> + else
> + hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
If you avoid the HPET register update here you possibly need to make
sure that both fields are unconditionally written on the first call
after resume from suspension. hpet_resume() needs to somehow taint
the channels to signal that a re-write of the address and data fields
is mandatory regardless of what iommu_update_ire_from_msi() has
returned.
Thanks, Roger.
On 20.10.2025 18:22, Roger Pau Monné wrote:
> On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
>> Using dynamically allocated / maintained vectors has several downsides:
>> - possible nesting of IRQs due to the effects of IRQ migration,
>> - reduction of vectors available for devices,
>> - IRQs not moving as intended if there's shortage of vectors,
>> - higher runtime overhead.
>>
>> As the vector also doesn't need to be of any priority (first and foremost
>> it really shouldn't be of higher or same priority as the timer IRQ, as
>> that raises TIMER_SOFTIRQ anyway), avoid any "ordinary" vectors altogther
>> and use a vector from the 0x10...0x1f exception vector space. Exception vs
>> interrupt can easily be distinguished by checking for the presence of an
>> error code.
>>
>> With a fixed vector, less updating is now necessary in
>> set_channel_irq_affinity(); in particular channels don't need transiently
>> masking anymore, as the necessary update is now atomic. To fully leverage
>> this, however, we want to stop using hpet_msi_set_affinity() there. With
>> the transient masking dropped, we're no longer at risk of missing events.
>>
>> In principle a change to setup_vector_irq() would be necessary, but only
>> if we used low-prio vectors as direct-APIC ones. Since the change would be
>> at best benign here, it is being omitted.
>>
>> Fixes: 996576b965cc ("xen: allow up to 16383 cpus")
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> Release-Acked-by: Oleksii Kurochko<oleksii.kurochko@gmail.com>
>> ---
>> This is an alternative proposal to
>> https://lists.xen.org/archives/html/xen-devel/2014-03/msg00399.html.
>>
>> Should we keep hpet_msi_set_affinity() at all? We'd better not have the
>> generic IRQ subsystem play with our IRQs' affinities ... (If so, this
>> likely would want to be a separate patch, though.)
>
> I think that needs to become a no-op, with possibly an ASSERT? Is it
> possibly for dom0 to try to balance this IRQ? I would think not.
I'd consider it an error if that was possible. But then the same goes for
other Xen-internal IRQs, like the IOMMU ones. They all implement a
.set_affinity hook ...
>> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
>> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
>> {
>> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
>> + struct msi_msg msg = ch->msi.msg;
>>
>> ASSERT(!local_irq_is_enabled());
>> spin_lock(&desc->lock);
>> - hpet_msi_mask(desc);
>> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
>> - hpet_msi_unmask(desc);
>> +
>> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
>> +
>> + /*
>> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
>> + * actual update below (either of the IRTE or of [just] message address;
>> + * with interrupt remapping message address/data don't change) now being
>> + * atomic, we can avoid masking the IRQ around the update. As a result
>> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
>> + * keeps setting the new deadline only afterwards).
>> + */
>> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
>> +
>> spin_unlock(&desc->lock);
>>
>> - spin_unlock(&ch->lock);
>> + msg.dest32 = cpu_physical_id(ch->cpu);
>> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
>> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
>> + if ( msg.dest32 != ch->msi.msg.dest32 )
>> + {
>> + ch->msi.msg = msg;
>> +
>> + if ( iommu_intremap != iommu_intremap_off )
>> + {
>> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
>>
>> - /* We may have missed an interrupt due to the temporary masking. */
>> - if ( ch->event_handler && ch->next_event < NOW() )
>> - ch->event_handler(ch);
>> + ASSERT(rc <= 0);
>> + if ( rc > 0 )
>> + {
>> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
>> + ASSERT(msg.address_lo ==
>> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
>> + }
>
> The sequence of asserts seem wrong here, the asserts inside of the rc
> > 0 check will never trigger, because there's an ASSERT(rc <= 0)
> ahead of them?
Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
we ought to check (and presumably fail on) data or address having changed.
Whereas when we get back 0, we're told "no change" anyway, and hence
checking isn't even needed. Did I misunderstand the purpose of the zero
vs positive return value here?
Of could I could switch to using "rc >= 0" anyway; I actually had it that
way first, but then decided the extra checks would be redundant in the 0
case.
>> + }
>> + else
>> + hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
>
> If you avoid the HPET register update here you possibly need to make
> sure that both fields are unconditionally written on the first call
> after resume from suspension. hpet_resume() needs to somehow taint
> the channels to signal that a re-write of the address and data fields
> is mandatory regardless of what iommu_update_ire_from_msi() has
> returned.
hpet_broadcast_resume() calls __hpet_setup_msi_irq() (and hence
hpet_msi_write()), which I thought is enough?
Jan
On Tue, Oct 21, 2025 at 08:42:13AM +0200, Jan Beulich wrote:
> On 20.10.2025 18:22, Roger Pau Monné wrote:
> > On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
> >> Using dynamically allocated / maintained vectors has several downsides:
> >> - possible nesting of IRQs due to the effects of IRQ migration,
> >> - reduction of vectors available for devices,
> >> - IRQs not moving as intended if there's shortage of vectors,
> >> - higher runtime overhead.
> >>
> >> As the vector also doesn't need to be of any priority (first and foremost
> >> it really shouldn't be of higher or same priority as the timer IRQ, as
> >> that raises TIMER_SOFTIRQ anyway), avoid any "ordinary" vectors altogther
> >> and use a vector from the 0x10...0x1f exception vector space. Exception vs
> >> interrupt can easily be distinguished by checking for the presence of an
> >> error code.
> >>
> >> With a fixed vector, less updating is now necessary in
> >> set_channel_irq_affinity(); in particular channels don't need transiently
> >> masking anymore, as the necessary update is now atomic. To fully leverage
> >> this, however, we want to stop using hpet_msi_set_affinity() there. With
> >> the transient masking dropped, we're no longer at risk of missing events.
> >>
> >> In principle a change to setup_vector_irq() would be necessary, but only
> >> if we used low-prio vectors as direct-APIC ones. Since the change would be
> >> at best benign here, it is being omitted.
> >>
> >> Fixes: 996576b965cc ("xen: allow up to 16383 cpus")
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> Release-Acked-by: Oleksii Kurochko<oleksii.kurochko@gmail.com>
> >> ---
> >> This is an alternative proposal to
> >> https://lists.xen.org/archives/html/xen-devel/2014-03/msg00399.html.
> >>
> >> Should we keep hpet_msi_set_affinity() at all? We'd better not have the
> >> generic IRQ subsystem play with our IRQs' affinities ... (If so, this
> >> likely would want to be a separate patch, though.)
> >
> > I think that needs to become a no-op, with possibly an ASSERT? Is it
> > possibly for dom0 to try to balance this IRQ? I would think not.
>
> I'd consider it an error if that was possible. But then the same goes for
> other Xen-internal IRQs, like the IOMMU ones. They all implement a
> .set_affinity hook ...
We need such hook for fixup_irqs() at least, so that interrupts can be
evacuated when an AP goes offline. However movement of Xen owned IRQs
should be limited to Xen (if it's not already).
> >> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
> >> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
> >> {
> >> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
> >> + struct msi_msg msg = ch->msi.msg;
> >>
> >> ASSERT(!local_irq_is_enabled());
> >> spin_lock(&desc->lock);
> >> - hpet_msi_mask(desc);
> >> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
> >> - hpet_msi_unmask(desc);
> >> +
> >> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
> >> +
> >> + /*
> >> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
> >> + * actual update below (either of the IRTE or of [just] message address;
> >> + * with interrupt remapping message address/data don't change) now being
> >> + * atomic, we can avoid masking the IRQ around the update. As a result
> >> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
> >> + * keeps setting the new deadline only afterwards).
> >> + */
> >> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
> >> +
> >> spin_unlock(&desc->lock);
> >>
> >> - spin_unlock(&ch->lock);
> >> + msg.dest32 = cpu_physical_id(ch->cpu);
> >> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
> >> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
> >> + if ( msg.dest32 != ch->msi.msg.dest32 )
> >> + {
> >> + ch->msi.msg = msg;
> >> +
> >> + if ( iommu_intremap != iommu_intremap_off )
> >> + {
> >> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
> >>
> >> - /* We may have missed an interrupt due to the temporary masking. */
> >> - if ( ch->event_handler && ch->next_event < NOW() )
> >> - ch->event_handler(ch);
> >> + ASSERT(rc <= 0);
> >> + if ( rc > 0 )
> >> + {
> >> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
> >> + ASSERT(msg.address_lo ==
> >> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
> >> + }
> >
> > The sequence of asserts seem wrong here, the asserts inside of the rc
> > > 0 check will never trigger, because there's an ASSERT(rc <= 0)
> > ahead of them?
>
> Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
> we ought to check (and presumably fail on) data or address having changed.
Right, but the ASSERT(rc <= 0) will prevent reaching any of the
followup ASSERTs if rc == 1? IOW, we possibly want:
if ( rc > 0 )
{
dprintk(XENLOG_ERR,
"Unexpected HPET MSI setup returned: data: %#x address: %#lx expected data %#x address %#lx\n",
msg.data, msg.address,
ch->msi.msg.data, ch->msi.msg.address);
ASSERT_UNREACHABLE();
hpet_msi_mask(desc);
hpet_write32(msg.data, HPET_Tn_ROUTE(ch->idx));
hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
hpet_msi_unmask(desc);
}
ASSERT(!rc);
I'm unsure about attempting to propagate the returned values on release
builds, I guess it's slightly better than possibly using an outdated
RTE entry? Albeit this should never happen.
Also, should the desc->arch.cpu_mask update only be done after the MSI
fields have correctly updated, so that in case of failure of
iommu_update_ire_from_msi(9 we could return early form the function
and avoid changing desc->arch.cpu_mask?
> Whereas when we get back 0, we're told "no change" anyway, and hence
> checking isn't even needed. Did I misunderstand the purpose of the zero
> vs positive return value here?
>
> Of could I could switch to using "rc >= 0" anyway; I actually had it that
> way first, but then decided the extra checks would be redundant in the 0
> case.
>
> >> + }
> >> + else
> >> + hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
> >
> > If you avoid the HPET register update here you possibly need to make
> > sure that both fields are unconditionally written on the first call
> > after resume from suspension. hpet_resume() needs to somehow taint
> > the channels to signal that a re-write of the address and data fields
> > is mandatory regardless of what iommu_update_ire_from_msi() has
> > returned.
>
> hpet_broadcast_resume() calls __hpet_setup_msi_irq() (and hence
> hpet_msi_write()), which I thought is enough?
Oh, sorry, I was looking at hpet_resume(), not
hpet_broadcast_resume().
Thanks, Roger.
On 21.10.2025 15:49, Roger Pau Monné wrote:
> On Tue, Oct 21, 2025 at 08:42:13AM +0200, Jan Beulich wrote:
>> On 20.10.2025 18:22, Roger Pau Monné wrote:
>>> On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
>>>> Using dynamically allocated / maintained vectors has several downsides:
>>>> - possible nesting of IRQs due to the effects of IRQ migration,
>>>> - reduction of vectors available for devices,
>>>> - IRQs not moving as intended if there's shortage of vectors,
>>>> - higher runtime overhead.
>>>>
>>>> As the vector also doesn't need to be of any priority (first and foremost
>>>> it really shouldn't be of higher or same priority as the timer IRQ, as
>>>> that raises TIMER_SOFTIRQ anyway), avoid any "ordinary" vectors altogther
>>>> and use a vector from the 0x10...0x1f exception vector space. Exception vs
>>>> interrupt can easily be distinguished by checking for the presence of an
>>>> error code.
>>>>
>>>> With a fixed vector, less updating is now necessary in
>>>> set_channel_irq_affinity(); in particular channels don't need transiently
>>>> masking anymore, as the necessary update is now atomic. To fully leverage
>>>> this, however, we want to stop using hpet_msi_set_affinity() there. With
>>>> the transient masking dropped, we're no longer at risk of missing events.
>>>>
>>>> In principle a change to setup_vector_irq() would be necessary, but only
>>>> if we used low-prio vectors as direct-APIC ones. Since the change would be
>>>> at best benign here, it is being omitted.
>>>>
>>>> Fixes: 996576b965cc ("xen: allow up to 16383 cpus")
>>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>>> Release-Acked-by: Oleksii Kurochko<oleksii.kurochko@gmail.com>
>>>> ---
>>>> This is an alternative proposal to
>>>> https://lists.xen.org/archives/html/xen-devel/2014-03/msg00399.html.
>>>>
>>>> Should we keep hpet_msi_set_affinity() at all? We'd better not have the
>>>> generic IRQ subsystem play with our IRQs' affinities ... (If so, this
>>>> likely would want to be a separate patch, though.)
>>>
>>> I think that needs to become a no-op, with possibly an ASSERT? Is it
>>> possibly for dom0 to try to balance this IRQ? I would think not.
>>
>> I'd consider it an error if that was possible. But then the same goes for
>> other Xen-internal IRQs, like the IOMMU ones. They all implement a
>> .set_affinity hook ...
>
> We need such hook for fixup_irqs() at least, so that interrupts can be
> evacuated when an AP goes offline.
Hmm, yes. Just not here.
>>>> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
>>>> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
>>>> {
>>>> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
>>>> + struct msi_msg msg = ch->msi.msg;
>>>>
>>>> ASSERT(!local_irq_is_enabled());
>>>> spin_lock(&desc->lock);
>>>> - hpet_msi_mask(desc);
>>>> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
>>>> - hpet_msi_unmask(desc);
>>>> +
>>>> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
>>>> +
>>>> + /*
>>>> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
>>>> + * actual update below (either of the IRTE or of [just] message address;
>>>> + * with interrupt remapping message address/data don't change) now being
>>>> + * atomic, we can avoid masking the IRQ around the update. As a result
>>>> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
>>>> + * keeps setting the new deadline only afterwards).
>>>> + */
>>>> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
>>>> +
>>>> spin_unlock(&desc->lock);
>>>>
>>>> - spin_unlock(&ch->lock);
>>>> + msg.dest32 = cpu_physical_id(ch->cpu);
>>>> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
>>>> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
>>>> + if ( msg.dest32 != ch->msi.msg.dest32 )
>>>> + {
>>>> + ch->msi.msg = msg;
>>>> +
>>>> + if ( iommu_intremap != iommu_intremap_off )
>>>> + {
>>>> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
>>>>
>>>> - /* We may have missed an interrupt due to the temporary masking. */
>>>> - if ( ch->event_handler && ch->next_event < NOW() )
>>>> - ch->event_handler(ch);
>>>> + ASSERT(rc <= 0);
>>>> + if ( rc > 0 )
>>>> + {
>>>> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
>>>> + ASSERT(msg.address_lo ==
>>>> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
>>>> + }
>>>
>>> The sequence of asserts seem wrong here, the asserts inside of the rc
>>>> 0 check will never trigger, because there's an ASSERT(rc <= 0)
>>> ahead of them?
>>
>> Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
>> we ought to check (and presumably fail on) data or address having changed.
>
> Right, but the ASSERT(rc <= 0) will prevent reaching any of the
> followup ASSERTs if rc == 1?
Which is no problem, as we'd be dead already anyway if the first assertion
triggered. Nevertheless I've switched the if() to >= 0 (which then pointed
out a necessary change in AMD IOMMU code).
> IOW, we possibly want:
>
> if ( rc > 0 )
> {
> dprintk(XENLOG_ERR,
> "Unexpected HPET MSI setup returned: data: %#x address: %#lx expected data %#x address %#lx\n",
> msg.data, msg.address,
> ch->msi.msg.data, ch->msi.msg.address);
> ASSERT_UNREACHABLE();
> hpet_msi_mask(desc);
> hpet_write32(msg.data, HPET_Tn_ROUTE(ch->idx));
> hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
> hpet_msi_unmask(desc);
> }
> ASSERT(!rc);
To be honest, for my taste this goes too far as to what follows an
ASSERT_UNREACHABLE().
> I'm unsure about attempting to propagate the returned values on release
> builds, I guess it's slightly better than possibly using an outdated
> RTE entry? Albeit this should never happen.
Yes to the last remark; I don't actually see what you would want to do
with the propagated return value.
> Also, should the desc->arch.cpu_mask update only be done after the MSI
> fields have correctly updated, so that in case of failure of
> iommu_update_ire_from_msi(9 we could return early form the function
> and avoid changing desc->arch.cpu_mask?
Hmm, yes, I could do that, but then also in hpet_msi_set_affinity().
However, as this needs doing under the IRQ descriptor lock, I'd have to
either extend the locked region here (again), or re-acquire the lock
later. Neither looks very attractive to me.
Jan
On Wed, Oct 22, 2025 at 11:21:15AM +0200, Jan Beulich wrote:
> On 21.10.2025 15:49, Roger Pau Monné wrote:
> > On Tue, Oct 21, 2025 at 08:42:13AM +0200, Jan Beulich wrote:
> >> On 20.10.2025 18:22, Roger Pau Monné wrote:
> >>> On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
> >>>> Using dynamically allocated / maintained vectors has several downsides:
> >>>> - possible nesting of IRQs due to the effects of IRQ migration,
> >>>> - reduction of vectors available for devices,
> >>>> - IRQs not moving as intended if there's shortage of vectors,
> >>>> - higher runtime overhead.
> >>>>
> >>>> As the vector also doesn't need to be of any priority (first and foremost
> >>>> it really shouldn't be of higher or same priority as the timer IRQ, as
> >>>> that raises TIMER_SOFTIRQ anyway), avoid any "ordinary" vectors altogther
> >>>> and use a vector from the 0x10...0x1f exception vector space. Exception vs
> >>>> interrupt can easily be distinguished by checking for the presence of an
> >>>> error code.
> >>>>
> >>>> With a fixed vector, less updating is now necessary in
> >>>> set_channel_irq_affinity(); in particular channels don't need transiently
> >>>> masking anymore, as the necessary update is now atomic. To fully leverage
> >>>> this, however, we want to stop using hpet_msi_set_affinity() there. With
> >>>> the transient masking dropped, we're no longer at risk of missing events.
> >>>>
> >>>> In principle a change to setup_vector_irq() would be necessary, but only
> >>>> if we used low-prio vectors as direct-APIC ones. Since the change would be
> >>>> at best benign here, it is being omitted.
> >>>>
> >>>> Fixes: 996576b965cc ("xen: allow up to 16383 cpus")
> >>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >>>> Release-Acked-by: Oleksii Kurochko<oleksii.kurochko@gmail.com>
> >>>> ---
> >>>> This is an alternative proposal to
> >>>> https://lists.xen.org/archives/html/xen-devel/2014-03/msg00399.html.
> >>>>
> >>>> Should we keep hpet_msi_set_affinity() at all? We'd better not have the
> >>>> generic IRQ subsystem play with our IRQs' affinities ... (If so, this
> >>>> likely would want to be a separate patch, though.)
> >>>
> >>> I think that needs to become a no-op, with possibly an ASSERT? Is it
> >>> possibly for dom0 to try to balance this IRQ? I would think not.
> >>
> >> I'd consider it an error if that was possible. But then the same goes for
> >> other Xen-internal IRQs, like the IOMMU ones. They all implement a
> >> .set_affinity hook ...
> >
> > We need such hook for fixup_irqs() at least, so that interrupts can be
> > evacuated when an AP goes offline.
>
> Hmm, yes. Just not here.
>
> >>>> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
> >>>> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
> >>>> {
> >>>> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
> >>>> + struct msi_msg msg = ch->msi.msg;
> >>>>
> >>>> ASSERT(!local_irq_is_enabled());
> >>>> spin_lock(&desc->lock);
> >>>> - hpet_msi_mask(desc);
> >>>> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
> >>>> - hpet_msi_unmask(desc);
> >>>> +
> >>>> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
> >>>> +
> >>>> + /*
> >>>> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
> >>>> + * actual update below (either of the IRTE or of [just] message address;
> >>>> + * with interrupt remapping message address/data don't change) now being
> >>>> + * atomic, we can avoid masking the IRQ around the update. As a result
> >>>> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
> >>>> + * keeps setting the new deadline only afterwards).
> >>>> + */
> >>>> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
> >>>> +
> >>>> spin_unlock(&desc->lock);
> >>>>
> >>>> - spin_unlock(&ch->lock);
> >>>> + msg.dest32 = cpu_physical_id(ch->cpu);
> >>>> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
> >>>> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
> >>>> + if ( msg.dest32 != ch->msi.msg.dest32 )
> >>>> + {
> >>>> + ch->msi.msg = msg;
> >>>> +
> >>>> + if ( iommu_intremap != iommu_intremap_off )
> >>>> + {
> >>>> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
> >>>>
> >>>> - /* We may have missed an interrupt due to the temporary masking. */
> >>>> - if ( ch->event_handler && ch->next_event < NOW() )
> >>>> - ch->event_handler(ch);
> >>>> + ASSERT(rc <= 0);
> >>>> + if ( rc > 0 )
> >>>> + {
> >>>> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
> >>>> + ASSERT(msg.address_lo ==
> >>>> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
> >>>> + }
> >>>
> >>> The sequence of asserts seem wrong here, the asserts inside of the rc
> >>>> 0 check will never trigger, because there's an ASSERT(rc <= 0)
> >>> ahead of them?
> >>
> >> Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
> >> we ought to check (and presumably fail on) data or address having changed.
> >
> > Right, but the ASSERT(rc <= 0) will prevent reaching any of the
> > followup ASSERTs if rc == 1?
>
> Which is no problem, as we'd be dead already anyway if the first assertion
> triggered. Nevertheless I've switched the if() to >= 0 (which then pointed
> out a necessary change in AMD IOMMU code).
Right, so and adjusted if condition plus an ASSERT_UNREACHABLE() at
the end of the if code block?
> > IOW, we possibly want:
> >
> > if ( rc > 0 )
> > {
> > dprintk(XENLOG_ERR,
> > "Unexpected HPET MSI setup returned: data: %#x address: %#lx expected data %#x address %#lx\n",
> > msg.data, msg.address,
> > ch->msi.msg.data, ch->msi.msg.address);
> > ASSERT_UNREACHABLE();
> > hpet_msi_mask(desc);
> > hpet_write32(msg.data, HPET_Tn_ROUTE(ch->idx));
> > hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
> > hpet_msi_unmask(desc);
> > }
> > ASSERT(!rc);
>
> To be honest, for my taste this goes too far as to what follows an
> ASSERT_UNREACHABLE().
I can understand that. It's the best way I've come up with attempting
to recover from a possible error in the release case, but I don't
particularly like it either.
> > I'm unsure about attempting to propagate the returned values on release
> > builds, I guess it's slightly better than possibly using an outdated
> > RTE entry? Albeit this should never happen.
>
> Yes to the last remark; I don't actually see what you would want to do
> with the propagated return value.
OK, I can this this not being clear. By propagate here I mean
propagate to the hardware registers, not to the function caller.
> > Also, should the desc->arch.cpu_mask update only be done after the MSI
> > fields have correctly updated, so that in case of failure of
> > iommu_update_ire_from_msi(9 we could return early form the function
> > and avoid changing desc->arch.cpu_mask?
>
> Hmm, yes, I could do that, but then also in hpet_msi_set_affinity().
> However, as this needs doing under the IRQ descriptor lock, I'd have to
> either extend the locked region here (again), or re-acquire the lock
> later. Neither looks very attractive to me.
Hm, I guess given the point in the release we can leave it as-is. It
would be nice, but this change is big enough as it is.
Thanks, Roger.
On 23.10.2025 10:39, Roger Pau Monné wrote:
> On Wed, Oct 22, 2025 at 11:21:15AM +0200, Jan Beulich wrote:
>> On 21.10.2025 15:49, Roger Pau Monné wrote:
>>> On Tue, Oct 21, 2025 at 08:42:13AM +0200, Jan Beulich wrote:
>>>> On 20.10.2025 18:22, Roger Pau Monné wrote:
>>>>> On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
>>>>>> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
>>>>>> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
>>>>>> {
>>>>>> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
>>>>>> + struct msi_msg msg = ch->msi.msg;
>>>>>>
>>>>>> ASSERT(!local_irq_is_enabled());
>>>>>> spin_lock(&desc->lock);
>>>>>> - hpet_msi_mask(desc);
>>>>>> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
>>>>>> - hpet_msi_unmask(desc);
>>>>>> +
>>>>>> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
>>>>>> +
>>>>>> + /*
>>>>>> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
>>>>>> + * actual update below (either of the IRTE or of [just] message address;
>>>>>> + * with interrupt remapping message address/data don't change) now being
>>>>>> + * atomic, we can avoid masking the IRQ around the update. As a result
>>>>>> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
>>>>>> + * keeps setting the new deadline only afterwards).
>>>>>> + */
>>>>>> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
>>>>>> +
>>>>>> spin_unlock(&desc->lock);
>>>>>>
>>>>>> - spin_unlock(&ch->lock);
>>>>>> + msg.dest32 = cpu_physical_id(ch->cpu);
>>>>>> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
>>>>>> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
>>>>>> + if ( msg.dest32 != ch->msi.msg.dest32 )
>>>>>> + {
>>>>>> + ch->msi.msg = msg;
>>>>>> +
>>>>>> + if ( iommu_intremap != iommu_intremap_off )
>>>>>> + {
>>>>>> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
>>>>>>
>>>>>> - /* We may have missed an interrupt due to the temporary masking. */
>>>>>> - if ( ch->event_handler && ch->next_event < NOW() )
>>>>>> - ch->event_handler(ch);
>>>>>> + ASSERT(rc <= 0);
>>>>>> + if ( rc > 0 )
>>>>>> + {
>>>>>> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
>>>>>> + ASSERT(msg.address_lo ==
>>>>>> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
>>>>>> + }
>>>>>
>>>>> The sequence of asserts seem wrong here, the asserts inside of the rc
>>>>>> 0 check will never trigger, because there's an ASSERT(rc <= 0)
>>>>> ahead of them?
>>>>
>>>> Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
>>>> we ought to check (and presumably fail on) data or address having changed.
>>>
>>> Right, but the ASSERT(rc <= 0) will prevent reaching any of the
>>> followup ASSERTs if rc == 1?
>>
>> Which is no problem, as we'd be dead already anyway if the first assertion
>> triggered. Nevertheless I've switched the if() to >= 0 (which then pointed
>> out a necessary change in AMD IOMMU code).
>
> Right, so and adjusted if condition plus an ASSERT_UNREACHABLE() at
> the end of the if code block?
That is, instead of
ASSERT(rc <= 0);
if ( rc >= 0 )
{
ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
ASSERT(msg.address_lo ==
hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
}
you'd prefer
if ( rc >= 0 )
{
ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
ASSERT(msg.address_lo ==
hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
ASSERT_UNREACHABLE();
}
? That's wrong though (for rc == 0), i.e. I fear I don't see what you mean.
>>> IOW, we possibly want:
>>>
>>> if ( rc > 0 )
>>> {
>>> dprintk(XENLOG_ERR,
>>> "Unexpected HPET MSI setup returned: data: %#x address: %#lx expected data %#x address %#lx\n",
>>> msg.data, msg.address,
>>> ch->msi.msg.data, ch->msi.msg.address);
>>> ASSERT_UNREACHABLE();
>>> hpet_msi_mask(desc);
>>> hpet_write32(msg.data, HPET_Tn_ROUTE(ch->idx));
>>> hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
>>> hpet_msi_unmask(desc);
>>> }
>>> ASSERT(!rc);
>>
>> To be honest, for my taste this goes too far as to what follows an
>> ASSERT_UNREACHABLE().
>
> I can understand that. It's the best way I've come up with attempting
> to recover from a possible error in the release case, but I don't
> particularly like it either.
>
>>> I'm unsure about attempting to propagate the returned values on release
>>> builds, I guess it's slightly better than possibly using an outdated
>>> RTE entry? Albeit this should never happen.
>>
>> Yes to the last remark; I don't actually see what you would want to do
>> with the propagated return value.
>
> OK, I can this this not being clear. By propagate here I mean
> propagate to the hardware registers, not to the function caller.
I.e. you still think adding the two hpet_write32() is going to be useful?
The mask/unmask, as I did say in another reply to your comments, isn't
useful here anyway (for already not being atomic), so I wouldn't see much
sense in having them. Plus of course we'd want to avoid the writes on
release builds if the values actually match, i.e. the construct would then
rather end up as two if-mismatch-then-write-else-assert-unreachable ones.
Just to mention - apart from this I have a working v3 ready to post.
Jan
On Thu, Oct 23, 2025 at 12:37:22PM +0200, Jan Beulich wrote:
> On 23.10.2025 10:39, Roger Pau Monné wrote:
> > On Wed, Oct 22, 2025 at 11:21:15AM +0200, Jan Beulich wrote:
> >> On 21.10.2025 15:49, Roger Pau Monné wrote:
> >>> On Tue, Oct 21, 2025 at 08:42:13AM +0200, Jan Beulich wrote:
> >>>> On 20.10.2025 18:22, Roger Pau Monné wrote:
> >>>>> On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
> >>>>>> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
> >>>>>> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
> >>>>>> {
> >>>>>> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
> >>>>>> + struct msi_msg msg = ch->msi.msg;
> >>>>>>
> >>>>>> ASSERT(!local_irq_is_enabled());
> >>>>>> spin_lock(&desc->lock);
> >>>>>> - hpet_msi_mask(desc);
> >>>>>> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
> >>>>>> - hpet_msi_unmask(desc);
> >>>>>> +
> >>>>>> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
> >>>>>> +
> >>>>>> + /*
> >>>>>> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
> >>>>>> + * actual update below (either of the IRTE or of [just] message address;
> >>>>>> + * with interrupt remapping message address/data don't change) now being
> >>>>>> + * atomic, we can avoid masking the IRQ around the update. As a result
> >>>>>> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
> >>>>>> + * keeps setting the new deadline only afterwards).
> >>>>>> + */
> >>>>>> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
> >>>>>> +
> >>>>>> spin_unlock(&desc->lock);
> >>>>>>
> >>>>>> - spin_unlock(&ch->lock);
> >>>>>> + msg.dest32 = cpu_physical_id(ch->cpu);
> >>>>>> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
> >>>>>> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
> >>>>>> + if ( msg.dest32 != ch->msi.msg.dest32 )
> >>>>>> + {
> >>>>>> + ch->msi.msg = msg;
> >>>>>> +
> >>>>>> + if ( iommu_intremap != iommu_intremap_off )
> >>>>>> + {
> >>>>>> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
> >>>>>>
> >>>>>> - /* We may have missed an interrupt due to the temporary masking. */
> >>>>>> - if ( ch->event_handler && ch->next_event < NOW() )
> >>>>>> - ch->event_handler(ch);
> >>>>>> + ASSERT(rc <= 0);
> >>>>>> + if ( rc > 0 )
> >>>>>> + {
> >>>>>> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
> >>>>>> + ASSERT(msg.address_lo ==
> >>>>>> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
> >>>>>> + }
> >>>>>
> >>>>> The sequence of asserts seem wrong here, the asserts inside of the rc
> >>>>>> 0 check will never trigger, because there's an ASSERT(rc <= 0)
> >>>>> ahead of them?
> >>>>
> >>>> Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
> >>>> we ought to check (and presumably fail on) data or address having changed.
> >>>
> >>> Right, but the ASSERT(rc <= 0) will prevent reaching any of the
> >>> followup ASSERTs if rc == 1?
> >>
> >> Which is no problem, as we'd be dead already anyway if the first assertion
> >> triggered. Nevertheless I've switched the if() to >= 0 (which then pointed
> >> out a necessary change in AMD IOMMU code).
> >
> > Right, so and adjusted if condition plus an ASSERT_UNREACHABLE() at
> > the end of the if code block?
>
> That is, instead of
>
> ASSERT(rc <= 0);
> if ( rc >= 0 )
> {
> ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
> ASSERT(msg.address_lo ==
> hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
> }
>
> you'd prefer
>
> if ( rc >= 0 )
> {
> ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
> ASSERT(msg.address_lo ==
> hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
> ASSERT_UNREACHABLE();
> }
>
> ? That's wrong though (for rc == 0), i.e. I fear I don't see what you mean.
Oh, I see, sorry for the suggestions, it's indeed wrong. FTAOD, what
do you plan to use then here?
You could replace the ASSERT_UNREACHABLE() for ASSERT(rc == 0) in my
suggestion I think?
Or maybe just do:
ASSERT(rc <= 0);
if ( !rc )
{
ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
ASSERT(msg.address_lo ==
hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
}
Was your original intention with those checks to ensure that for the
rc == 0 case the message fields remained unchanged?
> >>> IOW, we possibly want:
> >>>
> >>> if ( rc > 0 )
> >>> {
> >>> dprintk(XENLOG_ERR,
> >>> "Unexpected HPET MSI setup returned: data: %#x address: %#lx expected data %#x address %#lx\n",
> >>> msg.data, msg.address,
> >>> ch->msi.msg.data, ch->msi.msg.address);
> >>> ASSERT_UNREACHABLE();
> >>> hpet_msi_mask(desc);
> >>> hpet_write32(msg.data, HPET_Tn_ROUTE(ch->idx));
> >>> hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
> >>> hpet_msi_unmask(desc);
> >>> }
> >>> ASSERT(!rc);
> >>
> >> To be honest, for my taste this goes too far as to what follows an
> >> ASSERT_UNREACHABLE().
> >
> > I can understand that. It's the best way I've come up with attempting
> > to recover from a possible error in the release case, but I don't
> > particularly like it either.
> >
> >>> I'm unsure about attempting to propagate the returned values on release
> >>> builds, I guess it's slightly better than possibly using an outdated
> >>> RTE entry? Albeit this should never happen.
> >>
> >> Yes to the last remark; I don't actually see what you would want to do
> >> with the propagated return value.
> >
> > OK, I can this this not being clear. By propagate here I mean
> > propagate to the hardware registers, not to the function caller.
>
> I.e. you still think adding the two hpet_write32() is going to be useful?
> The mask/unmask, as I did say in another reply to your comments, isn't
> useful here anyway (for already not being atomic), so I wouldn't see much
> sense in having them.
Right, for it to be correct the masking would need to include the
iommu_update_ire_from_msi() call also.
> Plus of course we'd want to avoid the writes on
> release builds if the values actually match, i.e. the construct would then
> rather end up as two if-mismatch-then-write-else-assert-unreachable ones.
My concern would be that after this change we won't cope anymore with
iommu_update_ire_from_msi() returning a value > 1. Which might be
fine, as it's in theory not possible, but seems less robust than it
was before the change. I guess it's the price to pay for avoiding the
masking (unless you have other options).
Looking at the existing code is likely no worse than when
iommu_update_ire_from_msi() returning an error, and that case is
already silently ignored by hpet_msi_set_affinity(). So I think
silently ignoring > 0 is not that different, and doesn't make the
current handling much worse. It would be nice to handle those better,
but can be done separately.
Thanks, Roger.
On 23.10.2025 14:49, Roger Pau Monné wrote:
> On Thu, Oct 23, 2025 at 12:37:22PM +0200, Jan Beulich wrote:
>> On 23.10.2025 10:39, Roger Pau Monné wrote:
>>> On Wed, Oct 22, 2025 at 11:21:15AM +0200, Jan Beulich wrote:
>>>> On 21.10.2025 15:49, Roger Pau Monné wrote:
>>>>> On Tue, Oct 21, 2025 at 08:42:13AM +0200, Jan Beulich wrote:
>>>>>> On 20.10.2025 18:22, Roger Pau Monné wrote:
>>>>>>> On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
>>>>>>>> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
>>>>>>>> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
>>>>>>>> {
>>>>>>>> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
>>>>>>>> + struct msi_msg msg = ch->msi.msg;
>>>>>>>>
>>>>>>>> ASSERT(!local_irq_is_enabled());
>>>>>>>> spin_lock(&desc->lock);
>>>>>>>> - hpet_msi_mask(desc);
>>>>>>>> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
>>>>>>>> - hpet_msi_unmask(desc);
>>>>>>>> +
>>>>>>>> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
>>>>>>>> +
>>>>>>>> + /*
>>>>>>>> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
>>>>>>>> + * actual update below (either of the IRTE or of [just] message address;
>>>>>>>> + * with interrupt remapping message address/data don't change) now being
>>>>>>>> + * atomic, we can avoid masking the IRQ around the update. As a result
>>>>>>>> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
>>>>>>>> + * keeps setting the new deadline only afterwards).
>>>>>>>> + */
>>>>>>>> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
>>>>>>>> +
>>>>>>>> spin_unlock(&desc->lock);
>>>>>>>>
>>>>>>>> - spin_unlock(&ch->lock);
>>>>>>>> + msg.dest32 = cpu_physical_id(ch->cpu);
>>>>>>>> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
>>>>>>>> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
>>>>>>>> + if ( msg.dest32 != ch->msi.msg.dest32 )
>>>>>>>> + {
>>>>>>>> + ch->msi.msg = msg;
>>>>>>>> +
>>>>>>>> + if ( iommu_intremap != iommu_intremap_off )
>>>>>>>> + {
>>>>>>>> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
>>>>>>>>
>>>>>>>> - /* We may have missed an interrupt due to the temporary masking. */
>>>>>>>> - if ( ch->event_handler && ch->next_event < NOW() )
>>>>>>>> - ch->event_handler(ch);
>>>>>>>> + ASSERT(rc <= 0);
>>>>>>>> + if ( rc > 0 )
>>>>>>>> + {
>>>>>>>> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
>>>>>>>> + ASSERT(msg.address_lo ==
>>>>>>>> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
>>>>>>>> + }
>>>>>>>
>>>>>>> The sequence of asserts seem wrong here, the asserts inside of the rc
>>>>>>>> 0 check will never trigger, because there's an ASSERT(rc <= 0)
>>>>>>> ahead of them?
>>>>>>
>>>>>> Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
>>>>>> we ought to check (and presumably fail on) data or address having changed.
>>>>>
>>>>> Right, but the ASSERT(rc <= 0) will prevent reaching any of the
>>>>> followup ASSERTs if rc == 1?
>>>>
>>>> Which is no problem, as we'd be dead already anyway if the first assertion
>>>> triggered. Nevertheless I've switched the if() to >= 0 (which then pointed
>>>> out a necessary change in AMD IOMMU code).
>>>
>>> Right, so and adjusted if condition plus an ASSERT_UNREACHABLE() at
>>> the end of the if code block?
>>
>> That is, instead of
>>
>> ASSERT(rc <= 0);
>> if ( rc >= 0 )
>> {
>> ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
>> ASSERT(msg.address_lo ==
>> hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
>> }
>>
>> you'd prefer
>>
>> if ( rc >= 0 )
>> {
>> ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
>> ASSERT(msg.address_lo ==
>> hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
>> ASSERT_UNREACHABLE();
>> }
>>
>> ? That's wrong though (for rc == 0), i.e. I fear I don't see what you mean.
>
> Oh, I see, sorry for the suggestions, it's indeed wrong. FTAOD, what
> do you plan to use then here?
The earlier of the two code fragments presented above.
> You could replace the ASSERT_UNREACHABLE() for ASSERT(rc == 0) in my
> suggestion I think?
>
> Or maybe just do:
>
> ASSERT(rc <= 0);
> if ( !rc )
> {
> ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
> ASSERT(msg.address_lo ==
> hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
> }
>
> Was your original intention with those checks to ensure that for the
> rc == 0 case the message fields remained unchanged?
Well, originally the condition was "rc > 0", mainly to document the expectations.
Now that I changed it to "rc >= 0" (helping to find an issue in AMD IOMMU code
that needed addressing), I'm indeed (also) after checking for "no change" in the
"rc == 0" case. Hence why I've ended up with the code fragment above.
I guess it might be best if I post v3 before leaving today, and we take it from
there.
Jan
On 22.10.2025 11:21, Jan Beulich wrote:
> On 21.10.2025 15:49, Roger Pau Monné wrote:
>> On Tue, Oct 21, 2025 at 08:42:13AM +0200, Jan Beulich wrote:
>>> On 20.10.2025 18:22, Roger Pau Monné wrote:
>>>> On Mon, Oct 20, 2025 at 01:18:34PM +0200, Jan Beulich wrote:
>>>>> @@ -476,19 +486,50 @@ static struct hpet_event_channel *hpet_g
>>>>> static void set_channel_irq_affinity(struct hpet_event_channel *ch)
>>>>> {
>>>>> struct irq_desc *desc = irq_to_desc(ch->msi.irq);
>>>>> + struct msi_msg msg = ch->msi.msg;
>>>>>
>>>>> ASSERT(!local_irq_is_enabled());
>>>>> spin_lock(&desc->lock);
>>>>> - hpet_msi_mask(desc);
>>>>> - hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
>>>>> - hpet_msi_unmask(desc);
>>>>> +
>>>>> + per_cpu(vector_irq, ch->cpu)[HPET_BROADCAST_VECTOR] = ch->msi.irq;
>>>>> +
>>>>> + /*
>>>>> + * Open-coding a reduced form of hpet_msi_set_affinity() here. With the
>>>>> + * actual update below (either of the IRTE or of [just] message address;
>>>>> + * with interrupt remapping message address/data don't change) now being
>>>>> + * atomic, we can avoid masking the IRQ around the update. As a result
>>>>> + * we're no longer at risk of missing IRQs (provided hpet_broadcast_enter()
>>>>> + * keeps setting the new deadline only afterwards).
>>>>> + */
>>>>> + cpumask_copy(desc->arch.cpu_mask, cpumask_of(ch->cpu));
>>>>> +
>>>>> spin_unlock(&desc->lock);
>>>>>
>>>>> - spin_unlock(&ch->lock);
>>>>> + msg.dest32 = cpu_physical_id(ch->cpu);
>>>>> + msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
>>>>> + msg.address_lo |= MSI_ADDR_DEST_ID(msg.dest32);
>>>>> + if ( msg.dest32 != ch->msi.msg.dest32 )
>>>>> + {
>>>>> + ch->msi.msg = msg;
>>>>> +
>>>>> + if ( iommu_intremap != iommu_intremap_off )
>>>>> + {
>>>>> + int rc = iommu_update_ire_from_msi(&ch->msi, &msg);
>>>>>
>>>>> - /* We may have missed an interrupt due to the temporary masking. */
>>>>> - if ( ch->event_handler && ch->next_event < NOW() )
>>>>> - ch->event_handler(ch);
>>>>> + ASSERT(rc <= 0);
>>>>> + if ( rc > 0 )
>>>>> + {
>>>>> + ASSERT(msg.data == hpet_read32(HPET_Tn_ROUTE(ch->idx)));
>>>>> + ASSERT(msg.address_lo ==
>>>>> + hpet_read32(HPET_Tn_ROUTE(ch->idx) + 4));
>>>>> + }
>>>>
>>>> The sequence of asserts seem wrong here, the asserts inside of the rc
>>>>> 0 check will never trigger, because there's an ASSERT(rc <= 0)
>>>> ahead of them?
>>>
>>> Hmm. My way of thinking was that if we get back 1 (which we shouldn't),
>>> we ought to check (and presumably fail on) data or address having changed.
>>
>> Right, but the ASSERT(rc <= 0) will prevent reaching any of the
>> followup ASSERTs if rc == 1?
>
> Which is no problem, as we'd be dead already anyway if the first assertion
> triggered. Nevertheless I've switched the if() to >= 0 (which then pointed
> out a necessary change in AMD IOMMU code).
>
>> IOW, we possibly want:
>>
>> if ( rc > 0 )
>> {
>> dprintk(XENLOG_ERR,
>> "Unexpected HPET MSI setup returned: data: %#x address: %#lx expected data %#x address %#lx\n",
>> msg.data, msg.address,
>> ch->msi.msg.data, ch->msi.msg.address);
>> ASSERT_UNREACHABLE();
>> hpet_msi_mask(desc);
>> hpet_write32(msg.data, HPET_Tn_ROUTE(ch->idx));
>> hpet_write32(msg.address_lo, HPET_Tn_ROUTE(ch->idx) + 4);
>> hpet_msi_unmask(desc);
>> }
>> ASSERT(!rc);
>
> To be honest, for my taste this goes too far as to what follows an
> ASSERT_UNREACHABLE().
And it's insufficient: If we suspected the need for a non-atomic update,
the channel would need disabling before the IOMMU update.
Jan
© 2016 - 2025 Red Hat, Inc.