It's only ever handle_hpet_broadcast() that's used. While we now don't
enable IRQs right away, still play safe and convert the function pointer
to a boolean, to make sure no calls occur too early.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -40,7 +40,7 @@ struct hpet_event_channel
s_time_t next_event;
cpumask_var_t cpumask;
spinlock_t lock;
- void (*event_handler)(struct hpet_event_channel *ch);
+ bool event_handler;
unsigned int idx; /* physical channel idx */
unsigned int cpu; /* msi target */
@@ -194,7 +194,7 @@ static void evt_do_broadcast(cpumask_t *
cpumask_raise_softirq(mask, TIMER_SOFTIRQ);
}
-static void cf_check handle_hpet_broadcast(struct hpet_event_channel *ch)
+static void handle_hpet_broadcast(struct hpet_event_channel *ch)
{
cpumask_t mask;
s_time_t now, next_event;
@@ -254,7 +254,7 @@ static void cf_check hpet_interrupt_hand
if ( ch->cpu != cpu )
return;
- ch->event_handler(ch);
+ handle_hpet_broadcast(ch);
}
static void cf_check hpet_msi_unmask(struct irq_desc *desc)
@@ -515,7 +515,7 @@ static void set_channel_irq_affinity(str
/* We may have missed an interrupt due to the temporary masking. */
if ( ch->event_handler && ch->next_event < NOW() )
- ch->event_handler(ch);
+ handle_hpet_broadcast(ch);
}
static void hpet_attach_channel(unsigned int cpu,
@@ -643,7 +643,7 @@ void __init hpet_broadcast_init(void)
hpet_events[i].next_event = STIME_MAX;
spin_lock_init(&hpet_events[i].lock);
smp_wmb();
- hpet_events[i].event_handler = handle_hpet_broadcast;
+ hpet_events[i].event_handler = true;
hpet_events[i].msi.msi_attrib.maskbit = 1;
hpet_events[i].msi.msi_attrib.pos = MSI_TYPE_HPET;
@@ -794,7 +794,9 @@ int hpet_legacy_irq_tick(void)
(hpet_events->flags & (HPET_EVT_DISABLE|HPET_EVT_LEGACY)) !=
HPET_EVT_LEGACY )
return 0;
- hpet_events->event_handler(hpet_events);
+
+ handle_hpet_broadcast(hpet_events);
+
return 1;
}